var/home/core/zuul-output/0000755000175000017500000000000015072352113014524 5ustar corecorevar/home/core/zuul-output/logs/0000755000175000017500000000000015072363025015474 5ustar corecorevar/home/core/zuul-output/logs/kubelet.log0000644000000000000000004635530415072363016017712 0ustar rootrootOct 11 03:55:03 crc systemd[1]: Starting Kubernetes Kubelet... Oct 11 03:55:03 crc restorecon[4797]: Relabeled /var/lib/kubelet/config.json from system_u:object_r:unlabeled_t:s0 to system_u:object_r:container_var_lib_t:s0 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/device-plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/device-plugins/kubelet.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/volumes/kubernetes.io~configmap/nginx-conf/..2025_02_23_05_40_35.4114275528/nginx.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/22e96971 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/21c98286 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/0f1869e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/46889d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/5b6a5969 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/6c7921f5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4804f443 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/2a46b283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/a6b5573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4f88ee5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/5a4eee4b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/cd87c521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/38602af4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/1483b002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/0346718b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/d3ed4ada not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/3bb473a5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/8cd075a9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/00ab4760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/54a21c09 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/70478888 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/43802770 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/955a0edc not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/bca2d009 not reset as customized by admin to system_u:object_r:container_file_t:s0:c140,c1009 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/b295f9bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/bc46ea27 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5731fc1b not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5e1b2a3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/943f0936 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/3f764ee4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/8695e3f9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/aed7aa86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/c64d7448 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/0ba16bd2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/207a939f not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/54aa8cdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/1f5fa595 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/bf9c8153 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/47fba4ea not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/7ae55ce9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7906a268 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/ce43fa69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7fc7ea3a not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/d8c38b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/9ef015fb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/b9db6a41 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/b1733d79 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/afccd338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/9df0a185 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/18938cf8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/7ab4eb23 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/56930be6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_35.630010865 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/0d8e3722 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/d22b2e76 not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/e036759f not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/2734c483 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/57878fe7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/3f3c2e58 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/375bec3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/7bc41e08 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/48c7a72d not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/4b66701f not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/a5a1c202 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_40.1388695756 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/26f3df5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/6d8fb21d not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/50e94777 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208473b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/ec9e08ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3b787c39 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208eaed5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/93aa3a2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3c697968 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/ba950ec9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/cb5cdb37 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/f2df9827 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/fedaa673 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/9ca2df95 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/b2d7460e not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2207853c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/241c1c29 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2d910eaf not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/c6c0f2e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/399edc97 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8049f7cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/0cec5484 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/312446d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c406,c828 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8e56a35d not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/2d30ddb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/eca8053d not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/c3a25c9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c168,c522 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/b9609c22 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/e8b0eca9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/b36a9c3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/38af7b07 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/ae821620 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/baa23338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/2c534809 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/59b29eae not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/c91a8e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/4d87494a not reset as customized by admin to system_u:object_r:container_file_t:s0:c442,c857 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/1e33ca63 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/8dea7be2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d0b04a99 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d84f01e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/4109059b not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/a7258a3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/05bdf2b6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/f3261b51 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/315d045e not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/5fdcf278 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/d053f757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/c2850dc7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fcfb0b2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c7ac9b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fa0c0d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c609b6ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/2be6c296 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/89a32653 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/4eb9afeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/13af6efa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/b03f9724 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/e3d105cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/3aed4d83 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/0765fa6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/2cefc627 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/3dcc6345 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/365af391 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b1130c0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/236a5913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b9432e26 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/5ddb0e3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/986dc4fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/8a23ff9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/9728ae68 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/665f31d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/136c9b42 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/98a1575b not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/cac69136 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/5deb77a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/2ae53400 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/e46f2326 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/dc688d3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/3497c3cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/177eb008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/af5a2afa not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/d780cb1f not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/49b0f374 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/26fbb125 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/cf14125a not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/b7f86972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/e51d739c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/88ba6a69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/669a9acf not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/5cd51231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/75349ec7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/15c26839 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/45023dcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/2bb66a50 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/64d03bdd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/ab8e7ca0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/bb9be25f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/9a0b61d3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/d471b9d2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/8cb76b8e not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/11a00840 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/ec355a92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/992f735e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d59cdbbc not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/72133ff0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/c56c834c not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d13724c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/0a498258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa471982 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fc900d92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa7d68da not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/4bacf9b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/424021b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/fc2e31a3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/f51eefac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/c8997f2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/7481f599 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/fdafea19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/d0e1c571 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/ee398915 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/682bb6b8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a3e67855 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a989f289 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/915431bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/7796fdab not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/dcdb5f19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/a3aaa88c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/5508e3e6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/160585de not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/e99f8da3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/8bc85570 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/a5861c91 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/84db1135 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/9e1a6043 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/c1aba1c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/d55ccd6d not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/971cc9f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/8f2e3dcf not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/ceb35e9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/1c192745 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/5209e501 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/f83de4df not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/e7b978ac not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/c64304a1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/5384386b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/cce3e3ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/8fb75465 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/740f573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/32fd1134 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/0a861bd3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/80363026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/bfa952a8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..2025_02_23_05_33_31.333075221 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/793bf43d not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/7db1bb6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/4f6a0368 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/c12c7d86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/36c4a773 not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/4c1e98ae not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/a4c8115c not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/setup/7db1802e not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver/a008a7ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-syncer/2c836bac not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-regeneration-controller/0ce62299 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-insecure-readyz/945d2457 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-check-endpoints/7d5c1dd8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/index.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/bundle-v1.15.0.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/channel.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/package.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/bc8d0691 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/6b76097a not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/34d1af30 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/312ba61c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/645d5dd1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/16e825f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/4cf51fc9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/2a23d348 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/075dbd49 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/dd585ddd not reset as customized by admin to system_u:object_r:container_file_t:s0:c377,c642 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/17ebd0ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c343 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/005579f4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_23_11.1287037894 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/bf5f3b9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/af276eb7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/ea28e322 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/692e6683 not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/871746a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/4eb2e958 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/ca9b62da not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/0edd6fce not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/containers/controller-manager/89b4555f not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/655fcd71 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/0d43c002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/e68efd17 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/9acf9b65 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/5ae3ff11 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/1e59206a not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/27af16d1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c304,c1017 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/7918e729 not reset as customized by admin to system_u:object_r:container_file_t:s0:c853,c893 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/5d976d0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c585,c981 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/d7f55cbb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/f0812073 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/1a56cbeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/7fdd437e not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/cdfb5652 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 11 03:55:03 crc restorecon[4797]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/fix-audit-permissions/fb93119e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver/f1e8fc0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver-check-endpoints/218511f3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server/serving-certs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/ca8af7b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/72cc8a75 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/6e8a3760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4c3455c0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/2278acb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4b453e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/3ec09bda not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2/cacerts.bin not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java/cacerts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl/ca-bundle.trust.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/email-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/objsign-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2ae6433e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fde84897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75680d2e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/openshift-service-serving-signer_1740288168.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/facfc4fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f5a969c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CFCA_EV_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9ef4a08a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ingress-operator_1740288202.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2f332aed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/248c8271.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d10a21f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ACCVRAIZ1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a94d09e5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c9a4d3b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40193066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd8c0d63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b936d1c6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CA_Disig_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4fd49c6c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM_SERVIDORES_SEGUROS.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b81b93f0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f9a69fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b30d5fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ANF_Secure_Server_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b433981b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93851c9e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9282e51c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7dd1bc4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Actalis_Authentication_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/930ac5d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f47b495.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e113c810.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5931b5bc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Commercial.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2b349938.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e48193cf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/302904dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a716d4ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Networking.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93bc0acc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/86212b19.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b727005e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbc54cab.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f51bb24c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c28a8a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9c8dfbd4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ccc52f49.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cb1c3204.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ce5e74ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd08c599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6d41d539.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb5fa911.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e35234b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8cb5ee0f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a7c655d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f8fc53da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/de6d66f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d41b5e2a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/41a3f684.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1df5a75f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_2011.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e36a6752.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b872f2b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9576d26b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/228f89db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_ECC_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb717492.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d21b73c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b1b94ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/595e996b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_RSA_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b46e03d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/128f4b91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_3_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81f2d2b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Autoridad_de_Certificacion_Firmaprofesional_CIF_A62634068.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3bde41ac.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d16a5865.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_EC-384_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0179095f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ffa7f1eb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9482e63a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4dae3dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e359ba6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7e067d03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/95aff9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7746a63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Baltimore_CyberTrust_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/653b494a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3ad48a91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_2_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/54657681.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/82223c44.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8de2f56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d9dafe4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d96b65e2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee64a828.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40547a79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5a3f0ff8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a780d93.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/34d996fb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/eed8c118.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/89c02a45.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b1159c4c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d6325660.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4c339cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8312c4c1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_E1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8508e720.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5fdd185d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48bec511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/69105f4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b9bc432.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/32888f65.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b03dec0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/219d9499.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5acf816d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbf06781.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc99f41e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AAA_Certificate_Services.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/985c1f52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8794b4e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_BR_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7c037b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ef954a4e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_EV_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2add47b6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/90c5a3c8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0f3e76e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/53a1b57a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_EV_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5ad8a5d6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/68dd7389.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d04f354.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d6437c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/062cdee6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bd43e1dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7f3d5d1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c491639e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3513523f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/399e7759.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/feffd413.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d18e9066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/607986c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c90bc37d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1b0f7e5c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e08bfd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dd8e9d41.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed39abd0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a3418fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bc3f2570.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_High_Assurance_EV_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/244b5494.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81b9768f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4be590e0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_ECC_P384_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9846683b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/252252d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e8e7201.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_RSA4096_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d52c538d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c44cc0c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Trusted_Root_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75d1b2ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a2c66da8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ecccd8db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust.net_Certification_Authority__2048_.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/aee5f10d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e7271e8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0e59380.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4c3982f2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b99d060.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf64f35b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0a775a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/002c0b4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cc450945.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_EC1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/106f3e4d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b3fb433b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4042bcee.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/02265526.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/455f1b52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0d69c7e1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9f727ac7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5e98733a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0cd152c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc4d6a89.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6187b673.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/FIRMAPROFESIONAL_CA_ROOT-A_WEB.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ba8887ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/068570d1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f081611a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48a195d8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GDCA_TrustAUTH_R5_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f6fa695.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab59055e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b92fd57f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GLOBALTRUST_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fa5da96b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ec40989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7719f463.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1001acf7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f013ecaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/626dceaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c559d742.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1d3472b9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9479c8c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a81e292b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4bfab552.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e071171e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/57bcb2da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_ECC_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab5346f4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5046c355.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_RSA_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/865fbdf9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da0cfd1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/85cde254.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_ECC_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbb3f32b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureSign_RootCA11.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5860aaa6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/31188b5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HiPKI_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c7f1359b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f15c80c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hongkong_Post_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/09789157.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/18856ac4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e09d511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Commercial_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cf701eeb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d06393bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Public_Sector_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/10531352.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Izenpe.com.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureTrust_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0ed035a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsec_e-Szigno_Root_CA_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8160b96c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8651083.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2c63f966.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_ECC_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d89cda1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/01419da9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_RSA_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7a5b843.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_RSA_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf53fb88.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9591a472.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3afde786.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Gold_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NAVER_Global_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3fb36b73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d39b0a2c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a89d74c2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd58d51e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7db1890.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NetLock_Arany__Class_Gold__F__tan__s__tv__ny.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/988a38cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/60afe812.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f39fc864.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5443e9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GB_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e73d606e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dfc0fe80.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b66938e9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e1eab7c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GC_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/773e07ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c899c73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d59297b8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ddcda989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_1_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/749e9e03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/52b525c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7e8dc79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a819ef2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/08063a00.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b483515.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/064e0aa9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1f58a078.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6f7454b3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7fa05551.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76faf6c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9339512a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f387163d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee37c333.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e18bfb83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e442e424.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fe8a2cd8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/23f4c490.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5cd81ad7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0c70a8d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7892ad52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SZAFIR_ROOT_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4f316efb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_RSA_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/06dc52d5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/583d0756.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0bf05006.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/88950faa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9046744a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c860d51.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_RSA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6fa5da56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/33ee480d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Secure_Global_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/63a2c897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_ECC_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bdacca6f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ff34af3f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbff3a01.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_ECC_RootCA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_C1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/406c9bb1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_C3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Services_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Silver_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/99e1b953.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/14bc7599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TUBITAK_Kamu_SM_SSL_Kok_Sertifikasi_-_Surum_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a3adc42.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f459871d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_ECC_Root_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_RSA_Root_2023.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TeliaSonera_Root_CA_v1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telia_Root_CA_v2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f103249.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f058632f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-certificates.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9bf03295.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/98aaf404.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1cef98f5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/073bfcc5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2923b3f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f249de83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/edcbddb5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P256_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b5697b0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ae85e5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b74d2bd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P384_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d887a5bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9aef356c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TunTrust_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd64f3fc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e13665f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Extended_Validation_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f5dc4f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da7377f6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Global_G2_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c01eb047.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/304d27c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed858448.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f30dd6ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/04f60c28.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_ECC_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fc5a8f99.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/35105088.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee532fd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/XRamp_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/706f604c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76579174.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d86cdd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/882de061.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f618aec.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a9d40e02.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e-Szigno_Root_CA_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e868b802.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/83e9984f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ePKI_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca6e4ad9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d6523ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4b718d9b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/869fbf79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/containers/registry/f8d22bdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/6e8bbfac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/54dd7996 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/a4f1bb05 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/207129da not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/c1df39e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/15b8f1cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/77bd6913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/2382c1b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/704ce128 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/70d16fe0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/bfb95535 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/57a8e8e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/1b9d3e5e not reset as customized by admin to system_u:object_r:container_file_t:s0:c107,c917 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/fddb173c not reset as customized by admin to system_u:object_r:container_file_t:s0:c202,c983 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/95d3c6c4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/bfb5fff5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/2aef40aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/c0391cad not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/1119e69d not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/660608b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/8220bd53 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/85f99d5c not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/4b0225f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/9c2a3394 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/e820b243 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/1ca52ea0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/e6988e45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/6655f00b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/98bc3986 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/08e3458a not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/2a191cb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/6c4eeefb not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/f61a549c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/24891863 not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/fbdfd89c not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/9b63b3bc not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/8acde6d6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/node-driver-registrar/59ecbba3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/csi-provisioner/685d4be3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/containers/route-controller-manager/feaea55e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/63709497 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/d966b7fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/f5773757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/81c9edb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/57bf57ee not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/86f5e6aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/0aabe31d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/d2af85c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/09d157d9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c0fe7256 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c30319e4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/e6b1dd45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/2bb643f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/920de426 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/70fa1e87 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/a1c12a2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/9442e6c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/5b45ec72 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/3c9f3a59 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/1091c11b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/9a6821c6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/ec0c35e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/517f37e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/6214fe78 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/ba189c8b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/351e4f31 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/c0f219ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/8069f607 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/559c3d82 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/605ad488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/148df488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/3bf6dcb4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/022a2feb not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/938c3924 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/729fe23e not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/1fd5cbd4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/a96697e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/e155ddca not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/10dd0e0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/6f2c8392 not reset as customized by admin to system_u:object_r:container_file_t:s0:c267,c588 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/bd241ad9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/plugins/csi-hostpath not reset as customized by admin to system_u:object_r:container_file_t:s0 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/plugins/csi-hostpath/csi.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/plugins/kubernetes.io not reset as customized by admin to system_u:object_r:container_file_t:s0 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/plugins/kubernetes.io/csi not reset as customized by admin to system_u:object_r:container_file_t:s0 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983 not reset as customized by admin to system_u:object_r:container_file_t:s0 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount not reset as customized by admin to system_u:object_r:container_file_t:s0 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/vol_data.json not reset as customized by admin to system_u:object_r:container_file_t:s0 Oct 11 03:55:04 crc restorecon[4797]: /var/lib/kubelet/plugins_registry not reset as customized by admin to system_u:object_r:container_file_t:s0 Oct 11 03:55:04 crc restorecon[4797]: Relabeled /var/usrlocal/bin/kubenswrapper from system_u:object_r:bin_t:s0 to system_u:object_r:kubelet_exec_t:s0 Oct 11 03:55:05 crc kubenswrapper[4798]: Flag --container-runtime-endpoint has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Oct 11 03:55:05 crc kubenswrapper[4798]: Flag --minimum-container-ttl-duration has been deprecated, Use --eviction-hard or --eviction-soft instead. Will be removed in a future version. Oct 11 03:55:05 crc kubenswrapper[4798]: Flag --volume-plugin-dir has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Oct 11 03:55:05 crc kubenswrapper[4798]: Flag --register-with-taints has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Oct 11 03:55:05 crc kubenswrapper[4798]: Flag --pod-infra-container-image has been deprecated, will be removed in a future release. Image garbage collector will get sandbox image information from CRI. Oct 11 03:55:05 crc kubenswrapper[4798]: Flag --system-reserved has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.196147 4798 server.go:211] "--pod-infra-container-image will not be pruned by the image garbage collector in kubelet and should also be set in the remote runtime" Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.204736 4798 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.204770 4798 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.204782 4798 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.204792 4798 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.204801 4798 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.204810 4798 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.204819 4798 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.204828 4798 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.204838 4798 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.204849 4798 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.204858 4798 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.204866 4798 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.204874 4798 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.204882 4798 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.204890 4798 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.204899 4798 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.204907 4798 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.204915 4798 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.204922 4798 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.204930 4798 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.204938 4798 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.204946 4798 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.204954 4798 feature_gate.go:330] unrecognized feature gate: GatewayAPI Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.204963 4798 feature_gate.go:330] unrecognized feature gate: Example Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.204971 4798 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.204979 4798 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.204986 4798 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.204994 4798 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.205003 4798 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.205011 4798 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.205019 4798 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.205027 4798 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.205035 4798 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.205042 4798 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.205049 4798 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.205057 4798 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.205064 4798 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.205072 4798 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.205081 4798 feature_gate.go:330] unrecognized feature gate: OVNObservability Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.205089 4798 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.205096 4798 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.205103 4798 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.205111 4798 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.205119 4798 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.205127 4798 feature_gate.go:330] unrecognized feature gate: PlatformOperators Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.205134 4798 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.205145 4798 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.205155 4798 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.205164 4798 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.205173 4798 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.205182 4798 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.205191 4798 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.205199 4798 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.205207 4798 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.205216 4798 feature_gate.go:330] unrecognized feature gate: InsightsConfig Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.205224 4798 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.205231 4798 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.205239 4798 feature_gate.go:330] unrecognized feature gate: PinnedImages Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.205246 4798 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.205254 4798 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.205262 4798 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.205269 4798 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.205277 4798 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.205284 4798 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.205292 4798 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.205300 4798 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.205308 4798 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.205319 4798 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.205329 4798 feature_gate.go:330] unrecognized feature gate: NewOLM Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.205338 4798 feature_gate.go:330] unrecognized feature gate: SignatureStores Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.205346 4798 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.205544 4798 flags.go:64] FLAG: --address="0.0.0.0" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.205569 4798 flags.go:64] FLAG: --allowed-unsafe-sysctls="[]" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.205587 4798 flags.go:64] FLAG: --anonymous-auth="true" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.205600 4798 flags.go:64] FLAG: --application-metrics-count-limit="100" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.205612 4798 flags.go:64] FLAG: --authentication-token-webhook="false" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.205621 4798 flags.go:64] FLAG: --authentication-token-webhook-cache-ttl="2m0s" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.205633 4798 flags.go:64] FLAG: --authorization-mode="AlwaysAllow" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.205644 4798 flags.go:64] FLAG: --authorization-webhook-cache-authorized-ttl="5m0s" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.205654 4798 flags.go:64] FLAG: --authorization-webhook-cache-unauthorized-ttl="30s" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.205663 4798 flags.go:64] FLAG: --boot-id-file="/proc/sys/kernel/random/boot_id" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.205672 4798 flags.go:64] FLAG: --bootstrap-kubeconfig="/etc/kubernetes/kubeconfig" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.205682 4798 flags.go:64] FLAG: --cert-dir="/var/lib/kubelet/pki" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.205691 4798 flags.go:64] FLAG: --cgroup-driver="cgroupfs" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.205700 4798 flags.go:64] FLAG: --cgroup-root="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.205709 4798 flags.go:64] FLAG: --cgroups-per-qos="true" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.205718 4798 flags.go:64] FLAG: --client-ca-file="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.205727 4798 flags.go:64] FLAG: --cloud-config="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.205736 4798 flags.go:64] FLAG: --cloud-provider="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.205744 4798 flags.go:64] FLAG: --cluster-dns="[]" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.205754 4798 flags.go:64] FLAG: --cluster-domain="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.205763 4798 flags.go:64] FLAG: --config="/etc/kubernetes/kubelet.conf" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.205773 4798 flags.go:64] FLAG: --config-dir="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.205781 4798 flags.go:64] FLAG: --container-hints="/etc/cadvisor/container_hints.json" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.205791 4798 flags.go:64] FLAG: --container-log-max-files="5" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.205805 4798 flags.go:64] FLAG: --container-log-max-size="10Mi" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.205816 4798 flags.go:64] FLAG: --container-runtime-endpoint="/var/run/crio/crio.sock" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.205828 4798 flags.go:64] FLAG: --containerd="/run/containerd/containerd.sock" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.205841 4798 flags.go:64] FLAG: --containerd-namespace="k8s.io" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.205852 4798 flags.go:64] FLAG: --contention-profiling="false" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.205864 4798 flags.go:64] FLAG: --cpu-cfs-quota="true" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.205877 4798 flags.go:64] FLAG: --cpu-cfs-quota-period="100ms" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.205889 4798 flags.go:64] FLAG: --cpu-manager-policy="none" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.205900 4798 flags.go:64] FLAG: --cpu-manager-policy-options="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.205914 4798 flags.go:64] FLAG: --cpu-manager-reconcile-period="10s" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.205926 4798 flags.go:64] FLAG: --enable-controller-attach-detach="true" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.205937 4798 flags.go:64] FLAG: --enable-debugging-handlers="true" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.205947 4798 flags.go:64] FLAG: --enable-load-reader="false" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.205959 4798 flags.go:64] FLAG: --enable-server="true" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.205970 4798 flags.go:64] FLAG: --enforce-node-allocatable="[pods]" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.205984 4798 flags.go:64] FLAG: --event-burst="100" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.205996 4798 flags.go:64] FLAG: --event-qps="50" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.206008 4798 flags.go:64] FLAG: --event-storage-age-limit="default=0" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.206019 4798 flags.go:64] FLAG: --event-storage-event-limit="default=0" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.206031 4798 flags.go:64] FLAG: --eviction-hard="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.206044 4798 flags.go:64] FLAG: --eviction-max-pod-grace-period="0" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.206056 4798 flags.go:64] FLAG: --eviction-minimum-reclaim="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.206066 4798 flags.go:64] FLAG: --eviction-pressure-transition-period="5m0s" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.206081 4798 flags.go:64] FLAG: --eviction-soft="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.206093 4798 flags.go:64] FLAG: --eviction-soft-grace-period="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.206103 4798 flags.go:64] FLAG: --exit-on-lock-contention="false" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.206115 4798 flags.go:64] FLAG: --experimental-allocatable-ignore-eviction="false" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.206126 4798 flags.go:64] FLAG: --experimental-mounter-path="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.206137 4798 flags.go:64] FLAG: --fail-cgroupv1="false" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.206148 4798 flags.go:64] FLAG: --fail-swap-on="true" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.206158 4798 flags.go:64] FLAG: --feature-gates="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.206172 4798 flags.go:64] FLAG: --file-check-frequency="20s" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.206183 4798 flags.go:64] FLAG: --global-housekeeping-interval="1m0s" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.206194 4798 flags.go:64] FLAG: --hairpin-mode="promiscuous-bridge" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.206206 4798 flags.go:64] FLAG: --healthz-bind-address="127.0.0.1" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.206218 4798 flags.go:64] FLAG: --healthz-port="10248" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.206229 4798 flags.go:64] FLAG: --help="false" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.206239 4798 flags.go:64] FLAG: --hostname-override="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.206248 4798 flags.go:64] FLAG: --housekeeping-interval="10s" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.206257 4798 flags.go:64] FLAG: --http-check-frequency="20s" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.206267 4798 flags.go:64] FLAG: --image-credential-provider-bin-dir="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.206277 4798 flags.go:64] FLAG: --image-credential-provider-config="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.206287 4798 flags.go:64] FLAG: --image-gc-high-threshold="85" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.206297 4798 flags.go:64] FLAG: --image-gc-low-threshold="80" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.206306 4798 flags.go:64] FLAG: --image-service-endpoint="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.206314 4798 flags.go:64] FLAG: --kernel-memcg-notification="false" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.206323 4798 flags.go:64] FLAG: --kube-api-burst="100" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.206332 4798 flags.go:64] FLAG: --kube-api-content-type="application/vnd.kubernetes.protobuf" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.206342 4798 flags.go:64] FLAG: --kube-api-qps="50" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.206350 4798 flags.go:64] FLAG: --kube-reserved="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.206359 4798 flags.go:64] FLAG: --kube-reserved-cgroup="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.206367 4798 flags.go:64] FLAG: --kubeconfig="/var/lib/kubelet/kubeconfig" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.206377 4798 flags.go:64] FLAG: --kubelet-cgroups="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.206385 4798 flags.go:64] FLAG: --local-storage-capacity-isolation="true" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.206430 4798 flags.go:64] FLAG: --lock-file="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.206440 4798 flags.go:64] FLAG: --log-cadvisor-usage="false" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.206449 4798 flags.go:64] FLAG: --log-flush-frequency="5s" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.206458 4798 flags.go:64] FLAG: --log-json-info-buffer-size="0" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.206472 4798 flags.go:64] FLAG: --log-json-split-stream="false" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.206481 4798 flags.go:64] FLAG: --log-text-info-buffer-size="0" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.206490 4798 flags.go:64] FLAG: --log-text-split-stream="false" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.206499 4798 flags.go:64] FLAG: --logging-format="text" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.206507 4798 flags.go:64] FLAG: --machine-id-file="/etc/machine-id,/var/lib/dbus/machine-id" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.206517 4798 flags.go:64] FLAG: --make-iptables-util-chains="true" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.206526 4798 flags.go:64] FLAG: --manifest-url="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.206535 4798 flags.go:64] FLAG: --manifest-url-header="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.206546 4798 flags.go:64] FLAG: --max-housekeeping-interval="15s" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.206555 4798 flags.go:64] FLAG: --max-open-files="1000000" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.206566 4798 flags.go:64] FLAG: --max-pods="110" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.206575 4798 flags.go:64] FLAG: --maximum-dead-containers="-1" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.206584 4798 flags.go:64] FLAG: --maximum-dead-containers-per-container="1" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.206593 4798 flags.go:64] FLAG: --memory-manager-policy="None" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.206602 4798 flags.go:64] FLAG: --minimum-container-ttl-duration="6m0s" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.206611 4798 flags.go:64] FLAG: --minimum-image-ttl-duration="2m0s" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.206621 4798 flags.go:64] FLAG: --node-ip="192.168.126.11" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.206630 4798 flags.go:64] FLAG: --node-labels="node-role.kubernetes.io/control-plane=,node-role.kubernetes.io/master=,node.openshift.io/os_id=rhcos" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.206649 4798 flags.go:64] FLAG: --node-status-max-images="50" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.206658 4798 flags.go:64] FLAG: --node-status-update-frequency="10s" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.206669 4798 flags.go:64] FLAG: --oom-score-adj="-999" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.206678 4798 flags.go:64] FLAG: --pod-cidr="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.206687 4798 flags.go:64] FLAG: --pod-infra-container-image="quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:33549946e22a9ffa738fd94b1345f90921bc8f92fa6137784cb33c77ad806f9d" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.206700 4798 flags.go:64] FLAG: --pod-manifest-path="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.206709 4798 flags.go:64] FLAG: --pod-max-pids="-1" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.206718 4798 flags.go:64] FLAG: --pods-per-core="0" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.206727 4798 flags.go:64] FLAG: --port="10250" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.206736 4798 flags.go:64] FLAG: --protect-kernel-defaults="false" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.206747 4798 flags.go:64] FLAG: --provider-id="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.206756 4798 flags.go:64] FLAG: --qos-reserved="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.206766 4798 flags.go:64] FLAG: --read-only-port="10255" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.206775 4798 flags.go:64] FLAG: --register-node="true" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.206784 4798 flags.go:64] FLAG: --register-schedulable="true" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.206793 4798 flags.go:64] FLAG: --register-with-taints="node-role.kubernetes.io/master=:NoSchedule" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.206807 4798 flags.go:64] FLAG: --registry-burst="10" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.206817 4798 flags.go:64] FLAG: --registry-qps="5" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.206825 4798 flags.go:64] FLAG: --reserved-cpus="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.206834 4798 flags.go:64] FLAG: --reserved-memory="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.206845 4798 flags.go:64] FLAG: --resolv-conf="/etc/resolv.conf" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.206854 4798 flags.go:64] FLAG: --root-dir="/var/lib/kubelet" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.206862 4798 flags.go:64] FLAG: --rotate-certificates="false" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.206872 4798 flags.go:64] FLAG: --rotate-server-certificates="false" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.206880 4798 flags.go:64] FLAG: --runonce="false" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.206889 4798 flags.go:64] FLAG: --runtime-cgroups="/system.slice/crio.service" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.206899 4798 flags.go:64] FLAG: --runtime-request-timeout="2m0s" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.206909 4798 flags.go:64] FLAG: --seccomp-default="false" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.206918 4798 flags.go:64] FLAG: --serialize-image-pulls="true" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.206927 4798 flags.go:64] FLAG: --storage-driver-buffer-duration="1m0s" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.206936 4798 flags.go:64] FLAG: --storage-driver-db="cadvisor" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.206946 4798 flags.go:64] FLAG: --storage-driver-host="localhost:8086" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.206955 4798 flags.go:64] FLAG: --storage-driver-password="root" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.206964 4798 flags.go:64] FLAG: --storage-driver-secure="false" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.206973 4798 flags.go:64] FLAG: --storage-driver-table="stats" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.206982 4798 flags.go:64] FLAG: --storage-driver-user="root" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.206991 4798 flags.go:64] FLAG: --streaming-connection-idle-timeout="4h0m0s" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.207000 4798 flags.go:64] FLAG: --sync-frequency="1m0s" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.207010 4798 flags.go:64] FLAG: --system-cgroups="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.207019 4798 flags.go:64] FLAG: --system-reserved="cpu=200m,ephemeral-storage=350Mi,memory=350Mi" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.207032 4798 flags.go:64] FLAG: --system-reserved-cgroup="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.207041 4798 flags.go:64] FLAG: --tls-cert-file="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.207050 4798 flags.go:64] FLAG: --tls-cipher-suites="[]" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.207060 4798 flags.go:64] FLAG: --tls-min-version="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.207069 4798 flags.go:64] FLAG: --tls-private-key-file="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.207078 4798 flags.go:64] FLAG: --topology-manager-policy="none" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.207087 4798 flags.go:64] FLAG: --topology-manager-policy-options="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.207095 4798 flags.go:64] FLAG: --topology-manager-scope="container" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.207104 4798 flags.go:64] FLAG: --v="2" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.207115 4798 flags.go:64] FLAG: --version="false" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.207126 4798 flags.go:64] FLAG: --vmodule="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.207136 4798 flags.go:64] FLAG: --volume-plugin-dir="/etc/kubernetes/kubelet-plugins/volume/exec" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.207146 4798 flags.go:64] FLAG: --volume-stats-agg-period="1m0s" Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.207340 4798 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.207351 4798 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.207360 4798 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.207369 4798 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.207380 4798 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.207394 4798 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.207426 4798 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.207435 4798 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.207443 4798 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.207451 4798 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.207460 4798 feature_gate.go:330] unrecognized feature gate: GatewayAPI Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.207468 4798 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.207476 4798 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.207483 4798 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.207491 4798 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.207499 4798 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.207507 4798 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.207515 4798 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.207523 4798 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.207530 4798 feature_gate.go:330] unrecognized feature gate: PlatformOperators Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.207538 4798 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.207547 4798 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.207555 4798 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.207564 4798 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.207571 4798 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.207579 4798 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.207587 4798 feature_gate.go:330] unrecognized feature gate: PinnedImages Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.207595 4798 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.207603 4798 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.207611 4798 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.207618 4798 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.207626 4798 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.207634 4798 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.207642 4798 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.207649 4798 feature_gate.go:330] unrecognized feature gate: Example Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.207657 4798 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.207664 4798 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.207672 4798 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.207680 4798 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.207687 4798 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.207695 4798 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.207702 4798 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.207710 4798 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.207718 4798 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.207726 4798 feature_gate.go:330] unrecognized feature gate: NewOLM Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.207734 4798 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.207742 4798 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.207749 4798 feature_gate.go:330] unrecognized feature gate: InsightsConfig Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.207758 4798 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.207765 4798 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.207776 4798 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.207786 4798 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.207794 4798 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.207803 4798 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.207812 4798 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.207820 4798 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.207828 4798 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.207845 4798 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.207854 4798 feature_gate.go:330] unrecognized feature gate: OVNObservability Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.207862 4798 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.207869 4798 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.207877 4798 feature_gate.go:330] unrecognized feature gate: SignatureStores Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.207887 4798 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.207896 4798 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.207907 4798 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.207917 4798 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.207924 4798 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.207933 4798 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.207942 4798 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.207950 4798 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.207958 4798 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.209950 4798 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.219548 4798 server.go:491] "Kubelet version" kubeletVersion="v1.31.5" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.219588 4798 server.go:493] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.219688 4798 feature_gate.go:330] unrecognized feature gate: PinnedImages Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.219698 4798 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.219705 4798 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.219712 4798 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.219718 4798 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.219723 4798 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.219728 4798 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.219733 4798 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.219739 4798 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.219744 4798 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.219749 4798 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.219755 4798 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.219760 4798 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.219765 4798 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.219771 4798 feature_gate.go:330] unrecognized feature gate: GatewayAPI Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.219776 4798 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.219783 4798 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.219790 4798 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.219796 4798 feature_gate.go:330] unrecognized feature gate: Example Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.219801 4798 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.219806 4798 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.219812 4798 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.219817 4798 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.219822 4798 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.219828 4798 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.219834 4798 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.219839 4798 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.219844 4798 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.219849 4798 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.219855 4798 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.219863 4798 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.219871 4798 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.219876 4798 feature_gate.go:330] unrecognized feature gate: InsightsConfig Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.219882 4798 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.219888 4798 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.219893 4798 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.219898 4798 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.219903 4798 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.219909 4798 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.219915 4798 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.219920 4798 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.219925 4798 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.219931 4798 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.219937 4798 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.219969 4798 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.219976 4798 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.219982 4798 feature_gate.go:330] unrecognized feature gate: SignatureStores Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.219989 4798 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.219998 4798 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.220004 4798 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.220009 4798 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.220016 4798 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.220021 4798 feature_gate.go:330] unrecognized feature gate: NewOLM Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.220027 4798 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.220032 4798 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.220037 4798 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.220043 4798 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.220048 4798 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.220053 4798 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.220059 4798 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.220065 4798 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.220070 4798 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.220075 4798 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.220080 4798 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.220086 4798 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.220091 4798 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.220099 4798 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.220119 4798 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.220125 4798 feature_gate.go:330] unrecognized feature gate: OVNObservability Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.220130 4798 feature_gate.go:330] unrecognized feature gate: PlatformOperators Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.220137 4798 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.220146 4798 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.220309 4798 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.220318 4798 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.220324 4798 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.220329 4798 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.220335 4798 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.220340 4798 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.220345 4798 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.220353 4798 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.220361 4798 feature_gate.go:330] unrecognized feature gate: NewOLM Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.220366 4798 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.220373 4798 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.220379 4798 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.220384 4798 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.220395 4798 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.220418 4798 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.220423 4798 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.220430 4798 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.220435 4798 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.220442 4798 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.220449 4798 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.220487 4798 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.220494 4798 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.220499 4798 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.220506 4798 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.220511 4798 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.220517 4798 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.220524 4798 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.220530 4798 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.220536 4798 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.220541 4798 feature_gate.go:330] unrecognized feature gate: OVNObservability Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.220547 4798 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.220552 4798 feature_gate.go:330] unrecognized feature gate: InsightsConfig Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.220557 4798 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.220563 4798 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.220568 4798 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.220573 4798 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.220579 4798 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.220585 4798 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.220590 4798 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.220596 4798 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.220601 4798 feature_gate.go:330] unrecognized feature gate: PlatformOperators Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.220606 4798 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.220612 4798 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.220617 4798 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.220622 4798 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.220627 4798 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.220633 4798 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.220638 4798 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.220644 4798 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.220649 4798 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.220656 4798 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.220662 4798 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.220668 4798 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.220674 4798 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.220679 4798 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.220684 4798 feature_gate.go:330] unrecognized feature gate: PinnedImages Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.220690 4798 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.220696 4798 feature_gate.go:330] unrecognized feature gate: Example Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.220701 4798 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.220707 4798 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.220712 4798 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.220717 4798 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.220722 4798 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.220727 4798 feature_gate.go:330] unrecognized feature gate: GatewayAPI Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.220732 4798 feature_gate.go:330] unrecognized feature gate: SignatureStores Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.220738 4798 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.220744 4798 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.220751 4798 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.220757 4798 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.220764 4798 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.220769 4798 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.220778 4798 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.220972 4798 server.go:940] "Client rotation is on, will bootstrap in background" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.226670 4798 bootstrap.go:85] "Current kubeconfig file contents are still valid, no bootstrap necessary" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.226807 4798 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-client-current.pem". Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.228594 4798 server.go:997] "Starting client certificate rotation" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.228619 4798 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate rotation is enabled Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.230050 4798 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate expiration is 2026-02-24 05:52:08 +0000 UTC, rotation deadline is 2026-01-09 22:46:22.105551496 +0000 UTC Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.230185 4798 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Waiting 2178h51m16.875371097s for next certificate rotation Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.259573 4798 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.263252 4798 dynamic_cafile_content.go:161] "Starting controller" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.279487 4798 log.go:25] "Validated CRI v1 runtime API" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.310647 4798 log.go:25] "Validated CRI v1 image API" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.312457 4798 server.go:1437] "Using cgroup driver setting received from the CRI runtime" cgroupDriver="systemd" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.319612 4798 fs.go:133] Filesystem UUIDs: map[0b076daa-c26a-46d2-b3a6-72a8dbc6e257:/dev/vda4 2025-10-11-03-50-43-00:/dev/sr0 7B77-95E7:/dev/vda2 de0497b0-db1b-465a-b278-03db02455c71:/dev/vda3] Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.319653 4798 fs.go:134] Filesystem partitions: map[/dev/shm:{mountpoint:/dev/shm major:0 minor:22 fsType:tmpfs blockSize:0} /dev/vda3:{mountpoint:/boot major:252 minor:3 fsType:ext4 blockSize:0} /dev/vda4:{mountpoint:/var major:252 minor:4 fsType:xfs blockSize:0} /run:{mountpoint:/run major:0 minor:24 fsType:tmpfs blockSize:0} /run/user/1000:{mountpoint:/run/user/1000 major:0 minor:42 fsType:tmpfs blockSize:0} /tmp:{mountpoint:/tmp major:0 minor:30 fsType:tmpfs blockSize:0} /var/lib/etcd:{mountpoint:/var/lib/etcd major:0 minor:43 fsType:tmpfs blockSize:0}] Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.344312 4798 manager.go:217] Machine: {Timestamp:2025-10-11 03:55:05.339068087 +0000 UTC m=+0.675357793 CPUVendorID:AuthenticAMD NumCores:12 NumPhysicalCores:1 NumSockets:12 CpuFrequency:2799998 MemoryCapacity:33654124544 SwapCapacity:0 MemoryByType:map[] NVMInfo:{MemoryModeCapacity:0 AppDirectModeCapacity:0 AvgPowerBudget:0} HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] MachineID:21801e6708c44f15b81395eb736a7cec SystemUUID:914a85fd-a642-4471-b13e-f33764f494b0 BootID:319d317c-5958-4e72-8ec1-75f04665505c Filesystems:[{Device:/dev/shm DeviceMajor:0 DeviceMinor:22 Capacity:16827060224 Type:vfs Inodes:4108169 HasInodes:true} {Device:/run DeviceMajor:0 DeviceMinor:24 Capacity:6730825728 Type:vfs Inodes:819200 HasInodes:true} {Device:/dev/vda4 DeviceMajor:252 DeviceMinor:4 Capacity:85292941312 Type:vfs Inodes:41679680 HasInodes:true} {Device:/tmp DeviceMajor:0 DeviceMinor:30 Capacity:16827064320 Type:vfs Inodes:1048576 HasInodes:true} {Device:/dev/vda3 DeviceMajor:252 DeviceMinor:3 Capacity:366869504 Type:vfs Inodes:98304 HasInodes:true} {Device:/run/user/1000 DeviceMajor:0 DeviceMinor:42 Capacity:3365408768 Type:vfs Inodes:821633 HasInodes:true} {Device:/var/lib/etcd DeviceMajor:0 DeviceMinor:43 Capacity:1073741824 Type:vfs Inodes:4108169 HasInodes:true}] DiskMap:map[252:0:{Name:vda Major:252 Minor:0 Size:214748364800 Scheduler:none}] NetworkDevices:[{Name:br-ex MacAddress:fa:16:3e:fc:3e:1b Speed:0 Mtu:1500} {Name:br-int MacAddress:d6:39:55:2e:22:71 Speed:0 Mtu:1400} {Name:ens3 MacAddress:fa:16:3e:fc:3e:1b Speed:-1 Mtu:1500} {Name:ens7 MacAddress:fa:16:3e:91:1c:2e Speed:-1 Mtu:1500} {Name:ens7.20 MacAddress:52:54:00:31:38:ea Speed:-1 Mtu:1496} {Name:ens7.21 MacAddress:52:54:00:8c:a9:c1 Speed:-1 Mtu:1496} {Name:ens7.22 MacAddress:52:54:00:ec:29:41 Speed:-1 Mtu:1496} {Name:ens7.23 MacAddress:52:54:00:ba:fd:43 Speed:-1 Mtu:1496} {Name:eth10 MacAddress:c2:d0:34:48:03:74 Speed:0 Mtu:1500} {Name:ovn-k8s-mp0 MacAddress:0a:58:0a:d9:00:02 Speed:0 Mtu:1400} {Name:ovs-system MacAddress:ce:4d:05:9d:b2:ae Speed:0 Mtu:1500}] Topology:[{Id:0 Memory:33654124544 HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] Cores:[{Id:0 Threads:[0] Caches:[{Id:0 Size:32768 Type:Data Level:1} {Id:0 Size:32768 Type:Instruction Level:1} {Id:0 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:0 Size:16777216 Type:Unified Level:3}] SocketID:0 BookID: DrawerID:} {Id:0 Threads:[1] Caches:[{Id:1 Size:32768 Type:Data Level:1} {Id:1 Size:32768 Type:Instruction Level:1} {Id:1 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:1 Size:16777216 Type:Unified Level:3}] SocketID:1 BookID: DrawerID:} {Id:0 Threads:[10] Caches:[{Id:10 Size:32768 Type:Data Level:1} {Id:10 Size:32768 Type:Instruction Level:1} {Id:10 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:10 Size:16777216 Type:Unified Level:3}] SocketID:10 BookID: DrawerID:} {Id:0 Threads:[11] Caches:[{Id:11 Size:32768 Type:Data Level:1} {Id:11 Size:32768 Type:Instruction Level:1} {Id:11 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:11 Size:16777216 Type:Unified Level:3}] SocketID:11 BookID: DrawerID:} {Id:0 Threads:[2] Caches:[{Id:2 Size:32768 Type:Data Level:1} {Id:2 Size:32768 Type:Instruction Level:1} {Id:2 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:2 Size:16777216 Type:Unified Level:3}] SocketID:2 BookID: DrawerID:} {Id:0 Threads:[3] Caches:[{Id:3 Size:32768 Type:Data Level:1} {Id:3 Size:32768 Type:Instruction Level:1} {Id:3 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:3 Size:16777216 Type:Unified Level:3}] SocketID:3 BookID: DrawerID:} {Id:0 Threads:[4] Caches:[{Id:4 Size:32768 Type:Data Level:1} {Id:4 Size:32768 Type:Instruction Level:1} {Id:4 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:4 Size:16777216 Type:Unified Level:3}] SocketID:4 BookID: DrawerID:} {Id:0 Threads:[5] Caches:[{Id:5 Size:32768 Type:Data Level:1} {Id:5 Size:32768 Type:Instruction Level:1} {Id:5 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:5 Size:16777216 Type:Unified Level:3}] SocketID:5 BookID: DrawerID:} {Id:0 Threads:[6] Caches:[{Id:6 Size:32768 Type:Data Level:1} {Id:6 Size:32768 Type:Instruction Level:1} {Id:6 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:6 Size:16777216 Type:Unified Level:3}] SocketID:6 BookID: DrawerID:} {Id:0 Threads:[7] Caches:[{Id:7 Size:32768 Type:Data Level:1} {Id:7 Size:32768 Type:Instruction Level:1} {Id:7 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:7 Size:16777216 Type:Unified Level:3}] SocketID:7 BookID: DrawerID:} {Id:0 Threads:[8] Caches:[{Id:8 Size:32768 Type:Data Level:1} {Id:8 Size:32768 Type:Instruction Level:1} {Id:8 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:8 Size:16777216 Type:Unified Level:3}] SocketID:8 BookID: DrawerID:} {Id:0 Threads:[9] Caches:[{Id:9 Size:32768 Type:Data Level:1} {Id:9 Size:32768 Type:Instruction Level:1} {Id:9 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:9 Size:16777216 Type:Unified Level:3}] SocketID:9 BookID: DrawerID:}] Caches:[] Distances:[10]}] CloudProvider:Unknown InstanceType:Unknown InstanceID:None} Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.344615 4798 manager_no_libpfm.go:29] cAdvisor is build without cgo and/or libpfm support. Perf event counters are not available. Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.344804 4798 manager.go:233] Version: {KernelVersion:5.14.0-427.50.2.el9_4.x86_64 ContainerOsVersion:Red Hat Enterprise Linux CoreOS 418.94.202502100215-0 DockerVersion: DockerAPIVersion: CadvisorVersion: CadvisorRevision:} Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.346189 4798 swap_util.go:113] "Swap is on" /proc/swaps contents="Filename\t\t\t\tType\t\tSize\t\tUsed\t\tPriority" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.346521 4798 container_manager_linux.go:267] "Container manager verified user specified cgroup-root exists" cgroupRoot=[] Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.346577 4798 container_manager_linux.go:272] "Creating Container Manager object based on Node Config" nodeConfig={"NodeName":"crc","RuntimeCgroupsName":"/system.slice/crio.service","SystemCgroupsName":"/system.slice","KubeletCgroupsName":"","KubeletOOMScoreAdj":-999,"ContainerRuntime":"","CgroupsPerQOS":true,"CgroupRoot":"/","CgroupDriver":"systemd","KubeletRootDir":"/var/lib/kubelet","ProtectKernelDefaults":true,"KubeReservedCgroupName":"","SystemReservedCgroupName":"","ReservedSystemCPUs":{},"EnforceNodeAllocatable":{"pods":{}},"KubeReserved":null,"SystemReserved":{"cpu":"200m","ephemeral-storage":"350Mi","memory":"350Mi"},"HardEvictionThresholds":[{"Signal":"memory.available","Operator":"LessThan","Value":{"Quantity":"100Mi","Percentage":0},"GracePeriod":0,"MinReclaim":null},{"Signal":"nodefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.1},"GracePeriod":0,"MinReclaim":null},{"Signal":"nodefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null},{"Signal":"imagefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.15},"GracePeriod":0,"MinReclaim":null},{"Signal":"imagefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null}],"QOSReserved":{},"CPUManagerPolicy":"none","CPUManagerPolicyOptions":null,"TopologyManagerScope":"container","CPUManagerReconcilePeriod":10000000000,"ExperimentalMemoryManagerPolicy":"None","ExperimentalMemoryManagerReservedMemory":null,"PodPidsLimit":4096,"EnforceCPULimits":true,"CPUCFSQuotaPeriod":100000000,"TopologyManagerPolicy":"none","TopologyManagerPolicyOptions":null,"CgroupVersion":2} Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.346870 4798 topology_manager.go:138] "Creating topology manager with none policy" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.346891 4798 container_manager_linux.go:303] "Creating device plugin manager" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.347713 4798 manager.go:142] "Creating Device Plugin manager" path="/var/lib/kubelet/device-plugins/kubelet.sock" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.347759 4798 server.go:66] "Creating device plugin registration server" version="v1beta1" socket="/var/lib/kubelet/device-plugins/kubelet.sock" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.347973 4798 state_mem.go:36] "Initialized new in-memory state store" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.348067 4798 server.go:1245] "Using root directory" path="/var/lib/kubelet" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.352901 4798 kubelet.go:418] "Attempting to sync node with API server" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.352940 4798 kubelet.go:313] "Adding static pod path" path="/etc/kubernetes/manifests" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.352981 4798 file.go:69] "Watching path" path="/etc/kubernetes/manifests" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.353001 4798 kubelet.go:324] "Adding apiserver pod source" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.353017 4798 apiserver.go:42] "Waiting for node sync before watching apiserver pods" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.359363 4798 kuberuntime_manager.go:262] "Container runtime initialized" containerRuntime="cri-o" version="1.31.5-4.rhaos4.18.gitdad78d5.el9" apiVersion="v1" Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.359818 4798 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.144:6443: connect: connection refused Oct 11 03:55:05 crc kubenswrapper[4798]: E1011 03:55:05.359944 4798 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.144:6443: connect: connection refused" logger="UnhandledError" Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.359813 4798 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.144:6443: connect: connection refused Oct 11 03:55:05 crc kubenswrapper[4798]: E1011 03:55:05.360018 4798 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.144:6443: connect: connection refused" logger="UnhandledError" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.360667 4798 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-server-current.pem". Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.362493 4798 kubelet.go:854] "Not starting ClusterTrustBundle informer because we are in static kubelet mode" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.364014 4798 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/portworx-volume" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.364039 4798 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/empty-dir" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.364046 4798 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/git-repo" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.364053 4798 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/host-path" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.364064 4798 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/nfs" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.364071 4798 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/secret" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.364078 4798 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/iscsi" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.364088 4798 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/downward-api" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.364098 4798 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/fc" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.364105 4798 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/configmap" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.364114 4798 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/projected" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.364121 4798 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/local-volume" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.365163 4798 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/csi" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.365611 4798 server.go:1280] "Started kubelet" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.366728 4798 ratelimit.go:55] "Setting rate limiting for endpoint" service="podresources" qps=100 burstTokens=10 Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.366952 4798 server.go:163] "Starting to listen" address="0.0.0.0" port=10250 Oct 11 03:55:05 crc systemd[1]: Started Kubernetes Kubelet. Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.367555 4798 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.144:6443: connect: connection refused Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.367326 4798 server.go:236] "Starting to serve the podresources API" endpoint="unix:/var/lib/kubelet/pod-resources/kubelet.sock" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.369831 4798 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate rotation is enabled Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.369863 4798 fs_resource_analyzer.go:67] "Starting FS ResourceAnalyzer" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.370239 4798 volume_manager.go:287] "The desired_state_of_world populator starts" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.370258 4798 volume_manager.go:289] "Starting Kubelet Volume Manager" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.370840 4798 desired_state_of_world_populator.go:146] "Desired state populator starts to run" Oct 11 03:55:05 crc kubenswrapper[4798]: E1011 03:55:05.370237 4798 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.370803 4798 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-27 10:08:25.083162705 +0000 UTC Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.370879 4798 certificate_manager.go:356] kubernetes.io/kubelet-serving: Waiting 1854h13m19.712286369s for next certificate rotation Oct 11 03:55:05 crc kubenswrapper[4798]: E1011 03:55:05.371596 4798 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.144:6443: connect: connection refused" interval="200ms" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.371635 4798 factory.go:219] Registration of the containerd container factory failed: unable to create containerd client: containerd: cannot unix dial containerd api service: dial unix /run/containerd/containerd.sock: connect: no such file or directory Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.371838 4798 factory.go:55] Registering systemd factory Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.371850 4798 factory.go:221] Registration of the systemd container factory successfully Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.372140 4798 factory.go:153] Registering CRI-O factory Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.372157 4798 factory.go:221] Registration of the crio container factory successfully Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.372177 4798 factory.go:103] Registering Raw factory Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.372193 4798 manager.go:1196] Started watching for new ooms in manager Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.378712 4798 manager.go:319] Starting recovery of all containers Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.372873 4798 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.144:6443: connect: connection refused Oct 11 03:55:05 crc kubenswrapper[4798]: E1011 03:55:05.380915 4798 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.144:6443: connect: connection refused" logger="UnhandledError" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.382219 4798 server.go:460] "Adding debug handlers to kubelet server" Oct 11 03:55:05 crc kubenswrapper[4798]: E1011 03:55:05.382991 4798 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": dial tcp 38.102.83.144:6443: connect: connection refused" event="&Event{ObjectMeta:{crc.186d538f8bfa1a48 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-10-11 03:55:05.365584456 +0000 UTC m=+0.701874142,LastTimestamp:2025-10-11 03:55:05.365584456 +0000 UTC m=+0.701874142,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.390428 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.390483 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.390497 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.390510 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.390521 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.390532 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.390544 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.390557 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.390571 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.390583 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.390595 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.390608 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.390623 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.390637 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.390653 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.390666 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.390678 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.390690 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.390701 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.390717 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.390732 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.390744 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.390757 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.390772 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.390784 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.390796 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.390813 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.390827 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.390840 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.390852 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.390891 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.390909 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.390921 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.390934 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.390946 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.390963 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.390975 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" volumeName="kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.390990 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.391004 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.391017 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.391028 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.391041 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.391056 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.391068 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.391082 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.391097 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.391109 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.391155 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.391169 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.391180 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.391192 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.391203 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.391221 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.391234 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.391247 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.391258 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.391270 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.391282 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.391292 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.391324 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.391337 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.391348 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.391360 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.391374 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.391385 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.391420 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.391433 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.391444 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.391460 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.391472 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.391483 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.391493 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.391504 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.391516 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.391529 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.391545 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.391557 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.391568 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d751cbb-f2e2-430d-9754-c882a5e924a5" volumeName="kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.391578 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.391593 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.391605 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.391617 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.391628 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.391638 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.391649 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.391710 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" volumeName="kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.391724 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.391736 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.391748 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.391759 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.391770 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.391782 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.391793 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.391804 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.391815 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.391828 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.391841 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.391855 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.391866 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.391877 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.391891 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.391905 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.391917 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.391928 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="44663579-783b-4372-86d6-acf235a62d72" volumeName="kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.391946 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.391961 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.391974 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.391986 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.391998 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.392012 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.392025 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.392039 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.392053 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.392065 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.392079 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.392091 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.392103 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.392117 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.392131 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.392144 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49ef4625-1d3a-4a9f-b595-c2433d32326d" volumeName="kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.392158 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.392171 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.392184 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.392196 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.392208 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.392220 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.392233 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.392247 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.392259 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.392273 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.392285 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.392298 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.392313 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.392325 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.392337 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.392349 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.392362 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.392373 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.392385 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.392417 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.392430 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.392443 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.392457 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.392470 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.392482 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.392494 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.392507 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.394413 4798 reconstruct.go:144] "Volume is marked device as uncertain and added into the actual state" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" deviceMountPath="/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.394443 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.394458 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.394474 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.394486 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.394500 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.394511 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.394525 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.394537 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.394551 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.394565 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.394577 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.394591 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.394604 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.394618 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.394629 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.394641 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.394653 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3b6479f0-333b-4a96-9adf-2099afdc2447" volumeName="kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.394666 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.394680 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.394692 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.394703 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.394718 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.394729 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.394741 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.394753 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.394765 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.394776 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.394787 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.394795 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.394804 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.394812 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.394822 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.394830 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.394839 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.394851 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.394862 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.394874 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.394899 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.394911 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.394922 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.394933 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.394946 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.394957 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.394971 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.394982 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.394995 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.395008 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.395021 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.395033 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.395046 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.395057 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.395069 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.395080 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.395091 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.395105 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.395159 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.395173 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.395187 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.395200 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.395214 4798 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" seLinuxMountContext="" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.395225 4798 reconstruct.go:97] "Volume reconstruction finished" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.395233 4798 reconciler.go:26] "Reconciler: start to sync state" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.397861 4798 manager.go:324] Recovery completed Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.409837 4798 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.411181 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.411234 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.411250 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.412892 4798 cpu_manager.go:225] "Starting CPU manager" policy="none" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.412911 4798 cpu_manager.go:226] "Reconciling" reconcilePeriod="10s" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.412934 4798 state_mem.go:36] "Initialized new in-memory state store" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.419779 4798 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv4" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.422234 4798 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv6" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.422274 4798 status_manager.go:217] "Starting to sync pod status with apiserver" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.422294 4798 kubelet.go:2335] "Starting kubelet main sync loop" Oct 11 03:55:05 crc kubenswrapper[4798]: E1011 03:55:05.422337 4798 kubelet.go:2359] "Skipping pod synchronization" err="[container runtime status check may not have completed yet, PLEG is not healthy: pleg has yet to be successful]" Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.424049 4798 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.144:6443: connect: connection refused Oct 11 03:55:05 crc kubenswrapper[4798]: E1011 03:55:05.424142 4798 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.144:6443: connect: connection refused" logger="UnhandledError" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.434080 4798 policy_none.go:49] "None policy: Start" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.434713 4798 memory_manager.go:170] "Starting memorymanager" policy="None" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.434790 4798 state_mem.go:35] "Initializing new in-memory state store" Oct 11 03:55:05 crc kubenswrapper[4798]: E1011 03:55:05.471867 4798 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.478326 4798 manager.go:334] "Starting Device Plugin manager" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.478690 4798 manager.go:513] "Failed to read data from checkpoint" checkpoint="kubelet_internal_checkpoint" err="checkpoint is not found" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.478770 4798 server.go:79] "Starting device plugin registration server" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.479261 4798 eviction_manager.go:189] "Eviction manager: starting control loop" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.479280 4798 container_log_manager.go:189] "Initializing container log rotate workers" workers=1 monitorPeriod="10s" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.479921 4798 plugin_watcher.go:51] "Plugin Watcher Start" path="/var/lib/kubelet/plugins_registry" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.480037 4798 plugin_manager.go:116] "The desired_state_of_world populator (plugin watcher) starts" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.480050 4798 plugin_manager.go:118] "Starting Kubelet Plugin Manager" Oct 11 03:55:05 crc kubenswrapper[4798]: E1011 03:55:05.495205 4798 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.522918 4798 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-apiserver/kube-apiserver-crc","openshift-kube-controller-manager/kube-controller-manager-crc","openshift-kube-scheduler/openshift-kube-scheduler-crc","openshift-machine-config-operator/kube-rbac-proxy-crio-crc","openshift-etcd/etcd-crc"] Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.523045 4798 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.525884 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.525921 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.525931 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.526072 4798 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.526412 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.526476 4798 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.526959 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.526983 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.526991 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.527055 4798 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.527171 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.527219 4798 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.527576 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.527619 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.527638 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.527893 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.527934 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.527948 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.528105 4798 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.528203 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.528239 4798 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.529379 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.529438 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.529451 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.529749 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.529763 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.529771 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.529793 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.529811 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.529821 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.529849 4798 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.530001 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.530029 4798 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.530378 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.530429 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.530442 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.530621 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.530651 4798 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.530704 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.530744 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.530762 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.531361 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.531381 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.531448 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:05 crc kubenswrapper[4798]: E1011 03:55:05.572110 4798 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.144:6443: connect: connection refused" interval="400ms" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.579990 4798 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.581150 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.581192 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.581215 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.581242 4798 kubelet_node_status.go:76] "Attempting to register node" node="crc" Oct 11 03:55:05 crc kubenswrapper[4798]: E1011 03:55:05.581752 4798 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.144:6443: connect: connection refused" node="crc" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.597090 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.597146 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.597176 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.597203 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.597227 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.597279 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.597350 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.597423 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.597462 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.597492 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.597523 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.597555 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.597586 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.597631 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.597673 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.698249 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.698303 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.698324 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.698341 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.698357 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.698374 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.698422 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.698438 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.698428 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.698468 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.698505 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.698499 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.698455 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.698661 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.698680 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.698565 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.698711 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.698580 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.698581 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.698696 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.698554 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.698565 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.698754 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.698762 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.698786 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.698774 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.698770 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.698791 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.698827 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.698815 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.782258 4798 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.786725 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.786779 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.786792 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.786821 4798 kubelet_node_status.go:76] "Attempting to register node" node="crc" Oct 11 03:55:05 crc kubenswrapper[4798]: E1011 03:55:05.787334 4798 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.144:6443: connect: connection refused" node="crc" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.856583 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.863938 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.884976 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.900893 4798 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf4b27818a5e8e43d0dc095d08835c792.slice/crio-2d981b6e13eea45a603e6b7fc0e64df1d5fff3afda5d443cccd135f95e27e761 WatchSource:0}: Error finding container 2d981b6e13eea45a603e6b7fc0e64df1d5fff3afda5d443cccd135f95e27e761: Status 404 returned error can't find the container with id 2d981b6e13eea45a603e6b7fc0e64df1d5fff3afda5d443cccd135f95e27e761 Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.902145 4798 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2139d3e2895fc6797b9c76a1b4c9886d.slice/crio-e7284185790282611634221e4dde885b2978d676e50e5d0bbf4d721ad9fff768 WatchSource:0}: Error finding container e7284185790282611634221e4dde885b2978d676e50e5d0bbf4d721ad9fff768: Status 404 returned error can't find the container with id e7284185790282611634221e4dde885b2978d676e50e5d0bbf4d721ad9fff768 Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.903281 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.906074 4798 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf614b9022728cf315e60c057852e563e.slice/crio-cfb5b800f00bbc9082fa3c20ca6dfce203f68f6317f1dbe33307caccee419467 WatchSource:0}: Error finding container cfb5b800f00bbc9082fa3c20ca6dfce203f68f6317f1dbe33307caccee419467: Status 404 returned error can't find the container with id cfb5b800f00bbc9082fa3c20ca6dfce203f68f6317f1dbe33307caccee419467 Oct 11 03:55:05 crc kubenswrapper[4798]: I1011 03:55:05.908624 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.912850 4798 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3dcd261975c3d6b9a6ad6367fd4facd3.slice/crio-9f6413c7498f8506e2427a54aca6d7176329dee0163707cd3c1ad7bf2354c639 WatchSource:0}: Error finding container 9f6413c7498f8506e2427a54aca6d7176329dee0163707cd3c1ad7bf2354c639: Status 404 returned error can't find the container with id 9f6413c7498f8506e2427a54aca6d7176329dee0163707cd3c1ad7bf2354c639 Oct 11 03:55:05 crc kubenswrapper[4798]: W1011 03:55:05.929005 4798 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd1b160f5dda77d281dd8e69ec8d817f9.slice/crio-76fab41b2a7849821edf6fe67b91a3032c91d37cf72580f936fc03c8f6c9d5e8 WatchSource:0}: Error finding container 76fab41b2a7849821edf6fe67b91a3032c91d37cf72580f936fc03c8f6c9d5e8: Status 404 returned error can't find the container with id 76fab41b2a7849821edf6fe67b91a3032c91d37cf72580f936fc03c8f6c9d5e8 Oct 11 03:55:05 crc kubenswrapper[4798]: E1011 03:55:05.973591 4798 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.144:6443: connect: connection refused" interval="800ms" Oct 11 03:55:06 crc kubenswrapper[4798]: I1011 03:55:06.188095 4798 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 11 03:55:06 crc kubenswrapper[4798]: I1011 03:55:06.189831 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:06 crc kubenswrapper[4798]: I1011 03:55:06.189879 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:06 crc kubenswrapper[4798]: I1011 03:55:06.189892 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:06 crc kubenswrapper[4798]: I1011 03:55:06.189924 4798 kubelet_node_status.go:76] "Attempting to register node" node="crc" Oct 11 03:55:06 crc kubenswrapper[4798]: E1011 03:55:06.190427 4798 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.144:6443: connect: connection refused" node="crc" Oct 11 03:55:06 crc kubenswrapper[4798]: W1011 03:55:06.193117 4798 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.144:6443: connect: connection refused Oct 11 03:55:06 crc kubenswrapper[4798]: E1011 03:55:06.193213 4798 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.144:6443: connect: connection refused" logger="UnhandledError" Oct 11 03:55:06 crc kubenswrapper[4798]: W1011 03:55:06.275201 4798 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.144:6443: connect: connection refused Oct 11 03:55:06 crc kubenswrapper[4798]: E1011 03:55:06.275330 4798 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.144:6443: connect: connection refused" logger="UnhandledError" Oct 11 03:55:06 crc kubenswrapper[4798]: I1011 03:55:06.368718 4798 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.144:6443: connect: connection refused Oct 11 03:55:06 crc kubenswrapper[4798]: I1011 03:55:06.425852 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"2d981b6e13eea45a603e6b7fc0e64df1d5fff3afda5d443cccd135f95e27e761"} Oct 11 03:55:06 crc kubenswrapper[4798]: I1011 03:55:06.426787 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"76fab41b2a7849821edf6fe67b91a3032c91d37cf72580f936fc03c8f6c9d5e8"} Oct 11 03:55:06 crc kubenswrapper[4798]: I1011 03:55:06.427504 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"9f6413c7498f8506e2427a54aca6d7176329dee0163707cd3c1ad7bf2354c639"} Oct 11 03:55:06 crc kubenswrapper[4798]: I1011 03:55:06.428223 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"cfb5b800f00bbc9082fa3c20ca6dfce203f68f6317f1dbe33307caccee419467"} Oct 11 03:55:06 crc kubenswrapper[4798]: I1011 03:55:06.428946 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"e7284185790282611634221e4dde885b2978d676e50e5d0bbf4d721ad9fff768"} Oct 11 03:55:06 crc kubenswrapper[4798]: W1011 03:55:06.627492 4798 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.144:6443: connect: connection refused Oct 11 03:55:06 crc kubenswrapper[4798]: E1011 03:55:06.627587 4798 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.144:6443: connect: connection refused" logger="UnhandledError" Oct 11 03:55:06 crc kubenswrapper[4798]: E1011 03:55:06.774959 4798 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.144:6443: connect: connection refused" interval="1.6s" Oct 11 03:55:06 crc kubenswrapper[4798]: W1011 03:55:06.782478 4798 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.144:6443: connect: connection refused Oct 11 03:55:06 crc kubenswrapper[4798]: E1011 03:55:06.782549 4798 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.144:6443: connect: connection refused" logger="UnhandledError" Oct 11 03:55:06 crc kubenswrapper[4798]: I1011 03:55:06.990659 4798 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 11 03:55:06 crc kubenswrapper[4798]: I1011 03:55:06.991969 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:06 crc kubenswrapper[4798]: I1011 03:55:06.992078 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:06 crc kubenswrapper[4798]: I1011 03:55:06.992095 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:06 crc kubenswrapper[4798]: I1011 03:55:06.992136 4798 kubelet_node_status.go:76] "Attempting to register node" node="crc" Oct 11 03:55:06 crc kubenswrapper[4798]: E1011 03:55:06.993035 4798 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.144:6443: connect: connection refused" node="crc" Oct 11 03:55:07 crc kubenswrapper[4798]: I1011 03:55:07.368756 4798 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.144:6443: connect: connection refused Oct 11 03:55:07 crc kubenswrapper[4798]: I1011 03:55:07.437439 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerDied","Data":"5844d6ac6f40fe8f9056708ee6ffe64a95b58a4dd50582e455860ce6e5628cd3"} Oct 11 03:55:07 crc kubenswrapper[4798]: I1011 03:55:07.437501 4798 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 11 03:55:07 crc kubenswrapper[4798]: I1011 03:55:07.437288 4798 generic.go:334] "Generic (PLEG): container finished" podID="3dcd261975c3d6b9a6ad6367fd4facd3" containerID="5844d6ac6f40fe8f9056708ee6ffe64a95b58a4dd50582e455860ce6e5628cd3" exitCode=0 Oct 11 03:55:07 crc kubenswrapper[4798]: I1011 03:55:07.438868 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:07 crc kubenswrapper[4798]: I1011 03:55:07.438907 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:07 crc kubenswrapper[4798]: I1011 03:55:07.438925 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:07 crc kubenswrapper[4798]: I1011 03:55:07.441993 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"eff419f507fc5362e956419c6fd14dc6a161f7297b026a5ec449647051f98767"} Oct 11 03:55:07 crc kubenswrapper[4798]: I1011 03:55:07.442024 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"08a33c11cee6a48ebcf66f251111b8bd888bd40fe6b52f43a3768eac817ed59f"} Oct 11 03:55:07 crc kubenswrapper[4798]: I1011 03:55:07.442036 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"903e13c07c622087e30f21ffff2b60c31c852d8e994c23d7af6beab5a71bb873"} Oct 11 03:55:07 crc kubenswrapper[4798]: I1011 03:55:07.442046 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"1087e7b8de486a192155ed94497edbca0dc98e4a84a577267fb132fc48e6b142"} Oct 11 03:55:07 crc kubenswrapper[4798]: I1011 03:55:07.442061 4798 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 11 03:55:07 crc kubenswrapper[4798]: I1011 03:55:07.443461 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:07 crc kubenswrapper[4798]: I1011 03:55:07.443502 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:07 crc kubenswrapper[4798]: I1011 03:55:07.443521 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:07 crc kubenswrapper[4798]: I1011 03:55:07.445167 4798 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="889807d3a3e4bc895910c76d1d71dfc36ac6716878ee162c0d9aad3827a01377" exitCode=0 Oct 11 03:55:07 crc kubenswrapper[4798]: I1011 03:55:07.445312 4798 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 11 03:55:07 crc kubenswrapper[4798]: I1011 03:55:07.445326 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"889807d3a3e4bc895910c76d1d71dfc36ac6716878ee162c0d9aad3827a01377"} Oct 11 03:55:07 crc kubenswrapper[4798]: I1011 03:55:07.447292 4798 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="1171e7b719c532d99d1d222fc5dbc434060d5d149bd2b6b3dea02f9716816b26" exitCode=0 Oct 11 03:55:07 crc kubenswrapper[4798]: I1011 03:55:07.447436 4798 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 11 03:55:07 crc kubenswrapper[4798]: I1011 03:55:07.447439 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"1171e7b719c532d99d1d222fc5dbc434060d5d149bd2b6b3dea02f9716816b26"} Oct 11 03:55:07 crc kubenswrapper[4798]: I1011 03:55:07.447608 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:07 crc kubenswrapper[4798]: I1011 03:55:07.447663 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:07 crc kubenswrapper[4798]: I1011 03:55:07.447681 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:07 crc kubenswrapper[4798]: I1011 03:55:07.448355 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:07 crc kubenswrapper[4798]: I1011 03:55:07.448382 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:07 crc kubenswrapper[4798]: I1011 03:55:07.448406 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:07 crc kubenswrapper[4798]: I1011 03:55:07.453803 4798 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 11 03:55:07 crc kubenswrapper[4798]: I1011 03:55:07.454889 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:07 crc kubenswrapper[4798]: I1011 03:55:07.454942 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:07 crc kubenswrapper[4798]: I1011 03:55:07.454966 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:07 crc kubenswrapper[4798]: I1011 03:55:07.454975 4798 generic.go:334] "Generic (PLEG): container finished" podID="d1b160f5dda77d281dd8e69ec8d817f9" containerID="a697e8d1614e94c59ae1935733518a6de4259ea7eddbbe686b933d3f453b0f8d" exitCode=0 Oct 11 03:55:07 crc kubenswrapper[4798]: I1011 03:55:07.455027 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerDied","Data":"a697e8d1614e94c59ae1935733518a6de4259ea7eddbbe686b933d3f453b0f8d"} Oct 11 03:55:07 crc kubenswrapper[4798]: I1011 03:55:07.455077 4798 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 11 03:55:07 crc kubenswrapper[4798]: I1011 03:55:07.456562 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:07 crc kubenswrapper[4798]: I1011 03:55:07.456615 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:07 crc kubenswrapper[4798]: I1011 03:55:07.456639 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:08 crc kubenswrapper[4798]: W1011 03:55:08.048798 4798 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.144:6443: connect: connection refused Oct 11 03:55:08 crc kubenswrapper[4798]: E1011 03:55:08.048941 4798 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.144:6443: connect: connection refused" logger="UnhandledError" Oct 11 03:55:08 crc kubenswrapper[4798]: I1011 03:55:08.368299 4798 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.144:6443: connect: connection refused Oct 11 03:55:08 crc kubenswrapper[4798]: E1011 03:55:08.375253 4798 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.144:6443: connect: connection refused" interval="3.2s" Oct 11 03:55:08 crc kubenswrapper[4798]: I1011 03:55:08.460539 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"58ca7c079d81ba990c4508c024d6db7c3503be1d628477798459b32e2374ef22"} Oct 11 03:55:08 crc kubenswrapper[4798]: I1011 03:55:08.460585 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"1970a648bf4ee991b9b58bfade524bee260b45f727cba5864a6545a10487b3ae"} Oct 11 03:55:08 crc kubenswrapper[4798]: I1011 03:55:08.460598 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"a79edde80930f3aad632c14d6ad6b3df6f8449f32107a82eb906785e873e1ab7"} Oct 11 03:55:08 crc kubenswrapper[4798]: I1011 03:55:08.460611 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"d79289a0b2f4d9eee836f57ba898bd329060194772e80855547f1982371b0921"} Oct 11 03:55:08 crc kubenswrapper[4798]: W1011 03:55:08.461188 4798 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.144:6443: connect: connection refused Oct 11 03:55:08 crc kubenswrapper[4798]: E1011 03:55:08.461324 4798 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.144:6443: connect: connection refused" logger="UnhandledError" Oct 11 03:55:08 crc kubenswrapper[4798]: I1011 03:55:08.462626 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"1baa4c767a0681f8f483382dbcb45c01a8d63b7e9065744e1be1267cf3d4dc36"} Oct 11 03:55:08 crc kubenswrapper[4798]: I1011 03:55:08.462674 4798 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 11 03:55:08 crc kubenswrapper[4798]: I1011 03:55:08.463656 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:08 crc kubenswrapper[4798]: I1011 03:55:08.463687 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:08 crc kubenswrapper[4798]: I1011 03:55:08.463699 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:08 crc kubenswrapper[4798]: W1011 03:55:08.464175 4798 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.144:6443: connect: connection refused Oct 11 03:55:08 crc kubenswrapper[4798]: E1011 03:55:08.464233 4798 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.144:6443: connect: connection refused" logger="UnhandledError" Oct 11 03:55:08 crc kubenswrapper[4798]: I1011 03:55:08.467411 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"b2fb58514dfb39402d4e10ca699975671a3c53a7fe54b374a987efa8ba401ccf"} Oct 11 03:55:08 crc kubenswrapper[4798]: I1011 03:55:08.467446 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"1b88dcbb90798e97c13f7e7ef7bc7171bb1dc33cf488bd453a8d263a02093dfd"} Oct 11 03:55:08 crc kubenswrapper[4798]: I1011 03:55:08.467460 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"6897a04b7204031c861d74f6a4d475af9e9b88220cf02bfa50e7845c85cd5f70"} Oct 11 03:55:08 crc kubenswrapper[4798]: I1011 03:55:08.467492 4798 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 11 03:55:08 crc kubenswrapper[4798]: I1011 03:55:08.468486 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:08 crc kubenswrapper[4798]: I1011 03:55:08.468520 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:08 crc kubenswrapper[4798]: I1011 03:55:08.468537 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:08 crc kubenswrapper[4798]: I1011 03:55:08.470106 4798 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="cc4218c8c72ac3ee30192f5d64284678a7cf532624ccf43730d140385af91a48" exitCode=0 Oct 11 03:55:08 crc kubenswrapper[4798]: I1011 03:55:08.470153 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"cc4218c8c72ac3ee30192f5d64284678a7cf532624ccf43730d140385af91a48"} Oct 11 03:55:08 crc kubenswrapper[4798]: I1011 03:55:08.470214 4798 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 11 03:55:08 crc kubenswrapper[4798]: I1011 03:55:08.470281 4798 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 11 03:55:08 crc kubenswrapper[4798]: I1011 03:55:08.471361 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:08 crc kubenswrapper[4798]: I1011 03:55:08.471377 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:08 crc kubenswrapper[4798]: I1011 03:55:08.471416 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:08 crc kubenswrapper[4798]: I1011 03:55:08.471430 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:08 crc kubenswrapper[4798]: I1011 03:55:08.471432 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:08 crc kubenswrapper[4798]: I1011 03:55:08.471447 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:08 crc kubenswrapper[4798]: I1011 03:55:08.594196 4798 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 11 03:55:08 crc kubenswrapper[4798]: I1011 03:55:08.596115 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:08 crc kubenswrapper[4798]: I1011 03:55:08.596176 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:08 crc kubenswrapper[4798]: I1011 03:55:08.596186 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:08 crc kubenswrapper[4798]: I1011 03:55:08.596215 4798 kubelet_node_status.go:76] "Attempting to register node" node="crc" Oct 11 03:55:08 crc kubenswrapper[4798]: E1011 03:55:08.596859 4798 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.144:6443: connect: connection refused" node="crc" Oct 11 03:55:09 crc kubenswrapper[4798]: I1011 03:55:09.476751 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"805f894634e76814dc3071315ddd3d8ac46cdf65a12e97f4ca56b98fe53ab25a"} Oct 11 03:55:09 crc kubenswrapper[4798]: I1011 03:55:09.476814 4798 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 11 03:55:09 crc kubenswrapper[4798]: I1011 03:55:09.478942 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:09 crc kubenswrapper[4798]: I1011 03:55:09.479053 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:09 crc kubenswrapper[4798]: I1011 03:55:09.479126 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:09 crc kubenswrapper[4798]: I1011 03:55:09.479182 4798 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="af2282a68f8ef3673cddb76e5801be2a142e2dc27fdf5c908e49318cfb13cb9e" exitCode=0 Oct 11 03:55:09 crc kubenswrapper[4798]: I1011 03:55:09.479249 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"af2282a68f8ef3673cddb76e5801be2a142e2dc27fdf5c908e49318cfb13cb9e"} Oct 11 03:55:09 crc kubenswrapper[4798]: I1011 03:55:09.479329 4798 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 11 03:55:09 crc kubenswrapper[4798]: I1011 03:55:09.479588 4798 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 11 03:55:09 crc kubenswrapper[4798]: I1011 03:55:09.479603 4798 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Oct 11 03:55:09 crc kubenswrapper[4798]: I1011 03:55:09.479747 4798 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 11 03:55:09 crc kubenswrapper[4798]: I1011 03:55:09.480795 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:09 crc kubenswrapper[4798]: I1011 03:55:09.480815 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:09 crc kubenswrapper[4798]: I1011 03:55:09.480827 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:09 crc kubenswrapper[4798]: I1011 03:55:09.480837 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:09 crc kubenswrapper[4798]: I1011 03:55:09.480867 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:09 crc kubenswrapper[4798]: I1011 03:55:09.480879 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:09 crc kubenswrapper[4798]: I1011 03:55:09.480799 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:09 crc kubenswrapper[4798]: I1011 03:55:09.480927 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:09 crc kubenswrapper[4798]: I1011 03:55:09.480936 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:10 crc kubenswrapper[4798]: I1011 03:55:10.492862 4798 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Oct 11 03:55:10 crc kubenswrapper[4798]: I1011 03:55:10.494091 4798 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 11 03:55:10 crc kubenswrapper[4798]: I1011 03:55:10.493129 4798 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 11 03:55:10 crc kubenswrapper[4798]: I1011 03:55:10.493168 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"9c9f44877c10439bb8e546ce041f32308c83f1a547140ec66993c26ad4ee306b"} Oct 11 03:55:10 crc kubenswrapper[4798]: I1011 03:55:10.494371 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"ea9c51e79aa7e8f9ddfb800fb9d0cf94fb5d551210acb8e5f836ba30b7b47539"} Oct 11 03:55:10 crc kubenswrapper[4798]: I1011 03:55:10.494435 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"a0f1fc9aeb16230733964ca136c4fb9da847f42355e2087c563594c7420a3eb4"} Oct 11 03:55:10 crc kubenswrapper[4798]: I1011 03:55:10.494458 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"73da1fe1784a04b0babc804ccb6862856c863b9f793d96535c9ff59c5a0430f9"} Oct 11 03:55:10 crc kubenswrapper[4798]: I1011 03:55:10.494474 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"5102a1352bd10f0c1e73a50d61360a7e0d6ed3a3343e19b26142c3aa692d2e5f"} Oct 11 03:55:10 crc kubenswrapper[4798]: I1011 03:55:10.497040 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:10 crc kubenswrapper[4798]: I1011 03:55:10.497085 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:10 crc kubenswrapper[4798]: I1011 03:55:10.497097 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:10 crc kubenswrapper[4798]: I1011 03:55:10.497441 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:10 crc kubenswrapper[4798]: I1011 03:55:10.497503 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:10 crc kubenswrapper[4798]: I1011 03:55:10.497523 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:10 crc kubenswrapper[4798]: I1011 03:55:10.910795 4798 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Oct 11 03:55:10 crc kubenswrapper[4798]: I1011 03:55:10.911075 4798 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 11 03:55:10 crc kubenswrapper[4798]: I1011 03:55:10.913366 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:10 crc kubenswrapper[4798]: I1011 03:55:10.913428 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:10 crc kubenswrapper[4798]: I1011 03:55:10.913441 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:10 crc kubenswrapper[4798]: I1011 03:55:10.920754 4798 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Oct 11 03:55:11 crc kubenswrapper[4798]: I1011 03:55:11.495961 4798 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 11 03:55:11 crc kubenswrapper[4798]: I1011 03:55:11.496301 4798 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 11 03:55:11 crc kubenswrapper[4798]: I1011 03:55:11.497044 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:11 crc kubenswrapper[4798]: I1011 03:55:11.497092 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:11 crc kubenswrapper[4798]: I1011 03:55:11.497108 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:11 crc kubenswrapper[4798]: I1011 03:55:11.498323 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:11 crc kubenswrapper[4798]: I1011 03:55:11.498408 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:11 crc kubenswrapper[4798]: I1011 03:55:11.498429 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:11 crc kubenswrapper[4798]: I1011 03:55:11.529555 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-etcd/etcd-crc" Oct 11 03:55:11 crc kubenswrapper[4798]: I1011 03:55:11.797868 4798 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 11 03:55:11 crc kubenswrapper[4798]: I1011 03:55:11.800289 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:11 crc kubenswrapper[4798]: I1011 03:55:11.800364 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:11 crc kubenswrapper[4798]: I1011 03:55:11.800420 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:11 crc kubenswrapper[4798]: I1011 03:55:11.800461 4798 kubelet_node_status.go:76] "Attempting to register node" node="crc" Oct 11 03:55:12 crc kubenswrapper[4798]: I1011 03:55:12.499901 4798 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 11 03:55:12 crc kubenswrapper[4798]: I1011 03:55:12.501714 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:12 crc kubenswrapper[4798]: I1011 03:55:12.501781 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:12 crc kubenswrapper[4798]: I1011 03:55:12.501800 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:12 crc kubenswrapper[4798]: I1011 03:55:12.521649 4798 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-etcd/etcd-crc" Oct 11 03:55:13 crc kubenswrapper[4798]: I1011 03:55:13.503186 4798 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 11 03:55:13 crc kubenswrapper[4798]: I1011 03:55:13.504867 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:13 crc kubenswrapper[4798]: I1011 03:55:13.504926 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:13 crc kubenswrapper[4798]: I1011 03:55:13.504942 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:13 crc kubenswrapper[4798]: I1011 03:55:13.684914 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Oct 11 03:55:13 crc kubenswrapper[4798]: I1011 03:55:13.685326 4798 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 11 03:55:13 crc kubenswrapper[4798]: I1011 03:55:13.687836 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:13 crc kubenswrapper[4798]: I1011 03:55:13.687915 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:13 crc kubenswrapper[4798]: I1011 03:55:13.687939 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:13 crc kubenswrapper[4798]: I1011 03:55:13.858317 4798 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Oct 11 03:55:13 crc kubenswrapper[4798]: I1011 03:55:13.858697 4798 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Oct 11 03:55:13 crc kubenswrapper[4798]: I1011 03:55:13.858773 4798 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 11 03:55:13 crc kubenswrapper[4798]: I1011 03:55:13.860697 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:13 crc kubenswrapper[4798]: I1011 03:55:13.860755 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:13 crc kubenswrapper[4798]: I1011 03:55:13.860768 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:13 crc kubenswrapper[4798]: I1011 03:55:13.957832 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Oct 11 03:55:14 crc kubenswrapper[4798]: I1011 03:55:14.045534 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Oct 11 03:55:14 crc kubenswrapper[4798]: I1011 03:55:14.045699 4798 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Oct 11 03:55:14 crc kubenswrapper[4798]: I1011 03:55:14.045750 4798 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 11 03:55:14 crc kubenswrapper[4798]: I1011 03:55:14.047380 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:14 crc kubenswrapper[4798]: I1011 03:55:14.047453 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:14 crc kubenswrapper[4798]: I1011 03:55:14.047471 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:14 crc kubenswrapper[4798]: I1011 03:55:14.164186 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Oct 11 03:55:14 crc kubenswrapper[4798]: I1011 03:55:14.505655 4798 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 11 03:55:14 crc kubenswrapper[4798]: I1011 03:55:14.505848 4798 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Oct 11 03:55:14 crc kubenswrapper[4798]: I1011 03:55:14.506605 4798 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 11 03:55:14 crc kubenswrapper[4798]: I1011 03:55:14.506792 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:14 crc kubenswrapper[4798]: I1011 03:55:14.506813 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:14 crc kubenswrapper[4798]: I1011 03:55:14.506825 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:14 crc kubenswrapper[4798]: I1011 03:55:14.508082 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:14 crc kubenswrapper[4798]: I1011 03:55:14.508096 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:14 crc kubenswrapper[4798]: I1011 03:55:14.508105 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:15 crc kubenswrapper[4798]: I1011 03:55:15.212761 4798 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Oct 11 03:55:15 crc kubenswrapper[4798]: I1011 03:55:15.398948 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Oct 11 03:55:15 crc kubenswrapper[4798]: E1011 03:55:15.496271 4798 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Oct 11 03:55:15 crc kubenswrapper[4798]: I1011 03:55:15.508900 4798 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 11 03:55:15 crc kubenswrapper[4798]: I1011 03:55:15.508902 4798 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 11 03:55:15 crc kubenswrapper[4798]: I1011 03:55:15.510305 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:15 crc kubenswrapper[4798]: I1011 03:55:15.510373 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:15 crc kubenswrapper[4798]: I1011 03:55:15.510479 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:15 crc kubenswrapper[4798]: I1011 03:55:15.510647 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:15 crc kubenswrapper[4798]: I1011 03:55:15.510680 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:15 crc kubenswrapper[4798]: I1011 03:55:15.510692 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:18 crc kubenswrapper[4798]: I1011 03:55:18.213480 4798 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10357/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Oct 11 03:55:18 crc kubenswrapper[4798]: I1011 03:55:18.213603 4798 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://192.168.126.11:10357/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Oct 11 03:55:19 crc kubenswrapper[4798]: I1011 03:55:19.369318 4798 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": net/http: TLS handshake timeout Oct 11 03:55:19 crc kubenswrapper[4798]: W1011 03:55:19.668678 4798 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": net/http: TLS handshake timeout Oct 11 03:55:19 crc kubenswrapper[4798]: I1011 03:55:19.668767 4798 trace.go:236] Trace[278962670]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (11-Oct-2025 03:55:09.667) (total time: 10001ms): Oct 11 03:55:19 crc kubenswrapper[4798]: Trace[278962670]: ---"Objects listed" error:Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": net/http: TLS handshake timeout 10001ms (03:55:19.668) Oct 11 03:55:19 crc kubenswrapper[4798]: Trace[278962670]: [10.001652919s] [10.001652919s] END Oct 11 03:55:19 crc kubenswrapper[4798]: E1011 03:55:19.668787 4798 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": net/http: TLS handshake timeout" logger="UnhandledError" Oct 11 03:55:20 crc kubenswrapper[4798]: I1011 03:55:20.073528 4798 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 403" start-of-body={"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/livez\"","reason":"Forbidden","details":{},"code":403} Oct 11 03:55:20 crc kubenswrapper[4798]: I1011 03:55:20.073605 4798 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 403" Oct 11 03:55:20 crc kubenswrapper[4798]: I1011 03:55:20.083262 4798 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 403" start-of-body={"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/livez\": RBAC: [clusterrole.rbac.authorization.k8s.io \"system:public-info-viewer\" not found, clusterrole.rbac.authorization.k8s.io \"system:openshift:public-info-viewer\" not found]","reason":"Forbidden","details":{},"code":403} Oct 11 03:55:20 crc kubenswrapper[4798]: I1011 03:55:20.083323 4798 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 403" Oct 11 03:55:20 crc kubenswrapper[4798]: I1011 03:55:20.526566 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Oct 11 03:55:20 crc kubenswrapper[4798]: I1011 03:55:20.528207 4798 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="805f894634e76814dc3071315ddd3d8ac46cdf65a12e97f4ca56b98fe53ab25a" exitCode=255 Oct 11 03:55:20 crc kubenswrapper[4798]: I1011 03:55:20.528252 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"805f894634e76814dc3071315ddd3d8ac46cdf65a12e97f4ca56b98fe53ab25a"} Oct 11 03:55:20 crc kubenswrapper[4798]: I1011 03:55:20.528410 4798 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 11 03:55:20 crc kubenswrapper[4798]: I1011 03:55:20.529199 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:20 crc kubenswrapper[4798]: I1011 03:55:20.529235 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:20 crc kubenswrapper[4798]: I1011 03:55:20.529247 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:20 crc kubenswrapper[4798]: I1011 03:55:20.529754 4798 scope.go:117] "RemoveContainer" containerID="805f894634e76814dc3071315ddd3d8ac46cdf65a12e97f4ca56b98fe53ab25a" Oct 11 03:55:21 crc kubenswrapper[4798]: I1011 03:55:21.535484 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Oct 11 03:55:21 crc kubenswrapper[4798]: I1011 03:55:21.537700 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"4b8e09f509a9f25922d49024be407873d79d01edb604193f9d19fcd6c89cf96d"} Oct 11 03:55:21 crc kubenswrapper[4798]: I1011 03:55:21.537880 4798 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 11 03:55:21 crc kubenswrapper[4798]: I1011 03:55:21.538851 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:21 crc kubenswrapper[4798]: I1011 03:55:21.538891 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:21 crc kubenswrapper[4798]: I1011 03:55:21.538902 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:22 crc kubenswrapper[4798]: I1011 03:55:22.554734 4798 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-etcd/etcd-crc" Oct 11 03:55:22 crc kubenswrapper[4798]: I1011 03:55:22.554924 4798 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 11 03:55:22 crc kubenswrapper[4798]: I1011 03:55:22.556266 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:22 crc kubenswrapper[4798]: I1011 03:55:22.556330 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:22 crc kubenswrapper[4798]: I1011 03:55:22.556350 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:22 crc kubenswrapper[4798]: I1011 03:55:22.568437 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-etcd/etcd-crc" Oct 11 03:55:23 crc kubenswrapper[4798]: I1011 03:55:23.542188 4798 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 11 03:55:23 crc kubenswrapper[4798]: I1011 03:55:23.543113 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:23 crc kubenswrapper[4798]: I1011 03:55:23.543167 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:23 crc kubenswrapper[4798]: I1011 03:55:23.543181 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:23 crc kubenswrapper[4798]: I1011 03:55:23.865501 4798 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-apiserver/kube-apiserver-crc" Oct 11 03:55:23 crc kubenswrapper[4798]: I1011 03:55:23.865651 4798 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 11 03:55:23 crc kubenswrapper[4798]: I1011 03:55:23.865755 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Oct 11 03:55:23 crc kubenswrapper[4798]: I1011 03:55:23.866898 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:23 crc kubenswrapper[4798]: I1011 03:55:23.866958 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:23 crc kubenswrapper[4798]: I1011 03:55:23.866974 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:23 crc kubenswrapper[4798]: I1011 03:55:23.872121 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Oct 11 03:55:23 crc kubenswrapper[4798]: I1011 03:55:23.884806 4798 reflector.go:368] Caches populated for *v1.CSIDriver from k8s.io/client-go/informers/factory.go:160 Oct 11 03:55:24 crc kubenswrapper[4798]: I1011 03:55:24.169782 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Oct 11 03:55:24 crc kubenswrapper[4798]: I1011 03:55:24.170027 4798 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 11 03:55:24 crc kubenswrapper[4798]: I1011 03:55:24.171806 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:24 crc kubenswrapper[4798]: I1011 03:55:24.171862 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:24 crc kubenswrapper[4798]: I1011 03:55:24.171878 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:24 crc kubenswrapper[4798]: I1011 03:55:24.544583 4798 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 11 03:55:24 crc kubenswrapper[4798]: I1011 03:55:24.545407 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:24 crc kubenswrapper[4798]: I1011 03:55:24.545441 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:24 crc kubenswrapper[4798]: I1011 03:55:24.545453 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:25 crc kubenswrapper[4798]: E1011 03:55:25.077190 4798 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": context deadline exceeded" interval="6.4s" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.078827 4798 trace.go:236] Trace[273350884]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (11-Oct-2025 03:55:12.427) (total time: 12651ms): Oct 11 03:55:25 crc kubenswrapper[4798]: Trace[273350884]: ---"Objects listed" error: 12651ms (03:55:25.078) Oct 11 03:55:25 crc kubenswrapper[4798]: Trace[273350884]: [12.651639925s] [12.651639925s] END Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.078851 4798 reflector.go:368] Caches populated for *v1.RuntimeClass from k8s.io/client-go/informers/factory.go:160 Oct 11 03:55:25 crc kubenswrapper[4798]: E1011 03:55:25.081060 4798 kubelet_node_status.go:99] "Unable to register node with API server" err="nodes \"crc\" is forbidden: autoscaling.openshift.io/ManagedNode infra config cache not synchronized" node="crc" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.082922 4798 trace.go:236] Trace[504715781]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (11-Oct-2025 03:55:11.697) (total time: 13385ms): Oct 11 03:55:25 crc kubenswrapper[4798]: Trace[504715781]: ---"Objects listed" error: 13385ms (03:55:25.082) Oct 11 03:55:25 crc kubenswrapper[4798]: Trace[504715781]: [13.385773273s] [13.385773273s] END Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.082949 4798 reflector.go:368] Caches populated for *v1.Node from k8s.io/client-go/informers/factory.go:160 Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.084267 4798 reconstruct.go:205] "DevicePaths of reconstructed volumes updated" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.086922 4798 trace.go:236] Trace[1542093925]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (11-Oct-2025 03:55:11.502) (total time: 13584ms): Oct 11 03:55:25 crc kubenswrapper[4798]: Trace[1542093925]: ---"Objects listed" error: 13583ms (03:55:25.085) Oct 11 03:55:25 crc kubenswrapper[4798]: Trace[1542093925]: [13.584376196s] [13.584376196s] END Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.086948 4798 reflector.go:368] Caches populated for *v1.Service from k8s.io/client-go/informers/factory.go:160 Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.366898 4798 apiserver.go:52] "Watching apiserver" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.369898 4798 reflector.go:368] Caches populated for *v1.Pod from pkg/kubelet/config/apiserver.go:66 Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.370246 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-network-node-identity/network-node-identity-vrzqb","openshift-network-operator/iptables-alerter-4ln5h","openshift-network-operator/network-operator-58b4c7f79c-55gtf","openshift-dns/node-resolver-kszgd","openshift-machine-config-operator/machine-config-daemon-h28s2","openshift-network-console/networking-console-plugin-85b44fc459-gdk6g","openshift-network-diagnostics/network-check-source-55646444c4-trplf","openshift-network-diagnostics/network-check-target-xd92c"] Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.370716 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.370762 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.370790 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.370800 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.370767 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.370834 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.370997 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-kszgd" Oct 11 03:55:25 crc kubenswrapper[4798]: E1011 03:55:25.370986 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 11 03:55:25 crc kubenswrapper[4798]: E1011 03:55:25.371073 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 11 03:55:25 crc kubenswrapper[4798]: E1011 03:55:25.371110 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.371296 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.373733 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"openshift-service-ca.crt" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.373906 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.374075 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"iptables-alerter-script" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.374432 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-operator"/"metrics-tls" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.374581 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"kube-root-ca.crt" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.374783 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.374879 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"env-overrides" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.375325 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"openshift-service-ca.crt" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.375361 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.375466 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"node-resolver-dockercfg-kz9s7" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.375581 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"kube-root-ca.crt" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.375760 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-node-identity"/"network-node-identity-cert" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.376050 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.376072 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"ovnkube-identity-cm" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.376078 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.376211 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.376478 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.386197 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/42571bc8-2186-4e3b-bba9-28f5a8f364d0-proxy-tls\") pod \"machine-config-daemon-h28s2\" (UID: \"42571bc8-2186-4e3b-bba9-28f5a8f364d0\") " pod="openshift-machine-config-operator/machine-config-daemon-h28s2" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.386290 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q5gdw\" (UniqueName: \"kubernetes.io/projected/42571bc8-2186-4e3b-bba9-28f5a8f364d0-kube-api-access-q5gdw\") pod \"machine-config-daemon-h28s2\" (UID: \"42571bc8-2186-4e3b-bba9-28f5a8f364d0\") " pod="openshift-machine-config-operator/machine-config-daemon-h28s2" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.386329 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/42571bc8-2186-4e3b-bba9-28f5a8f364d0-rootfs\") pod \"machine-config-daemon-h28s2\" (UID: \"42571bc8-2186-4e3b-bba9-28f5a8f364d0\") " pod="openshift-machine-config-operator/machine-config-daemon-h28s2" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.386370 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.386442 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.386484 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.386516 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.386589 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.386625 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.386661 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.386713 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.386749 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/e999fc8d-dd8d-415f-b72b-1c6189cf18a9-hosts-file\") pod \"node-resolver-kszgd\" (UID: \"e999fc8d-dd8d-415f-b72b-1c6189cf18a9\") " pod="openshift-dns/node-resolver-kszgd" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.386793 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.386830 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 11 03:55:25 crc kubenswrapper[4798]: E1011 03:55:25.386854 4798 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Oct 11 03:55:25 crc kubenswrapper[4798]: E1011 03:55:25.386932 4798 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-10-11 03:55:25.886910815 +0000 UTC m=+21.223200501 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.386866 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h6wm8\" (UniqueName: \"kubernetes.io/projected/e999fc8d-dd8d-415f-b72b-1c6189cf18a9-kube-api-access-h6wm8\") pod \"node-resolver-kszgd\" (UID: \"e999fc8d-dd8d-415f-b72b-1c6189cf18a9\") " pod="openshift-dns/node-resolver-kszgd" Oct 11 03:55:25 crc kubenswrapper[4798]: E1011 03:55:25.386977 4798 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Oct 11 03:55:25 crc kubenswrapper[4798]: E1011 03:55:25.387005 4798 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-10-11 03:55:25.886998668 +0000 UTC m=+21.223288354 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.387010 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.387053 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/42571bc8-2186-4e3b-bba9-28f5a8f364d0-mcd-auth-proxy-config\") pod \"machine-config-daemon-h28s2\" (UID: \"42571bc8-2186-4e3b-bba9-28f5a8f364d0\") " pod="openshift-machine-config-operator/machine-config-daemon-h28s2" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.387085 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.387113 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.387154 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.387515 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.388354 4798 swap_util.go:74] "error creating dir to test if tmpfs noswap is enabled. Assuming not supported" mount path="" error="stat /var/lib/kubelet/plugins/kubernetes.io/empty-dir: no such file or directory" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.388530 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.390061 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.395512 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.399675 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Oct 11 03:55:25 crc kubenswrapper[4798]: E1011 03:55:25.403537 4798 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Oct 11 03:55:25 crc kubenswrapper[4798]: E1011 03:55:25.403651 4798 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Oct 11 03:55:25 crc kubenswrapper[4798]: E1011 03:55:25.403717 4798 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Oct 11 03:55:25 crc kubenswrapper[4798]: E1011 03:55:25.403856 4798 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-10-11 03:55:25.903834914 +0000 UTC m=+21.240124600 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.406811 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.407461 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.408215 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 11 03:55:25 crc kubenswrapper[4798]: E1011 03:55:25.408617 4798 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Oct 11 03:55:25 crc kubenswrapper[4798]: E1011 03:55:25.408649 4798 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Oct 11 03:55:25 crc kubenswrapper[4798]: E1011 03:55:25.408665 4798 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Oct 11 03:55:25 crc kubenswrapper[4798]: E1011 03:55:25.408727 4798 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-10-11 03:55:25.908706518 +0000 UTC m=+21.244996414 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.415057 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.423516 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.435914 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.438711 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 11 03:55:25 crc kubenswrapper[4798]: W1011 03:55:25.450065 4798 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podef543e1b_8068_4ea3_b32a_61027b32e95d.slice/crio-f30a78913d6c7e7c19d8b989c20b7402901b9a4daf385278c7e922a7831dd1f9 WatchSource:0}: Error finding container f30a78913d6c7e7c19d8b989c20b7402901b9a4daf385278c7e922a7831dd1f9: Status 404 returned error can't find the container with id f30a78913d6c7e7c19d8b989c20b7402901b9a4daf385278c7e922a7831dd1f9 Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.455793 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.471577 4798 desired_state_of_world_populator.go:154] "Finished populating initial desired state of world" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.473662 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42571bc8-2186-4e3b-bba9-28f5a8f364d0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5gdw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5gdw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-h28s2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.486273 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kszgd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e999fc8d-dd8d-415f-b72b-1c6189cf18a9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h6wm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kszgd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.488988 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.489037 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.489067 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.489093 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.489122 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.489175 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.489200 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.489234 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.489263 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.489292 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.489316 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.489342 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.489368 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.489410 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.489473 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.489498 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.489524 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.489552 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.489573 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" (OuterVolumeSpecName: "node-bootstrap-token") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "node-bootstrap-token". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.489579 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.489630 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.489656 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.489677 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.489696 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.489716 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.489735 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.489791 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.489810 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.489828 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.489846 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.489864 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.489882 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") pod \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\" (UID: \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.489902 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.489919 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.489936 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.489953 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.489969 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.489986 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.490004 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.490021 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.490038 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.490054 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.490071 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.490074 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" (OuterVolumeSpecName: "kube-api-access-nzwt7") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "kube-api-access-nzwt7". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.490076 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" (OuterVolumeSpecName: "kube-api-access-sb6h7") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "kube-api-access-sb6h7". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.490500 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" (OuterVolumeSpecName: "default-certificate") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "default-certificate". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.490834 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.490863 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" (OuterVolumeSpecName: "apiservice-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "apiservice-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.491134 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" (OuterVolumeSpecName: "kube-api-access-fqsjt") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "kube-api-access-fqsjt". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.491233 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.491189 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.491264 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" (OuterVolumeSpecName: "stats-auth") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "stats-auth". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.491336 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.490086 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.491625 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.491656 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.491680 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.491699 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.491733 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.491753 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.491777 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.491795 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.491815 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.491835 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.491861 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.491888 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.491915 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.492115 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.492142 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.492167 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.492169 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.492191 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.492223 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.492252 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.492278 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.492307 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.492330 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.492356 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") pod \"49ef4625-1d3a-4a9f-b595-c2433d32326d\" (UID: \"49ef4625-1d3a-4a9f-b595-c2433d32326d\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.492475 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.492566 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.492595 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.492620 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.492641 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.492663 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.492683 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.492706 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.493145 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.493184 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.493218 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.493246 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.493276 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.493307 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.493334 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.493363 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.493412 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.493442 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.493484 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.493511 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.493550 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.493584 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.496279 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.497681 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.497718 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.497747 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.497773 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.497797 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.497821 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.498426 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.497848 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.498707 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.498739 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.498765 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.498795 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.498826 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.498851 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.498874 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.498897 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.498919 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.498942 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.498965 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.498989 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.499212 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.499248 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.499279 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.499311 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.499337 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.499361 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.499387 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.499428 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.499458 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.499484 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.499508 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.499532 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.499556 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.499580 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.499609 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.499671 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.499702 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.499725 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.499749 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.499773 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.499795 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.499818 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.499840 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.499862 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.499885 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.499907 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.499927 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.499951 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.500097 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.500120 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.500141 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.500164 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.500189 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.500213 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.500234 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.492477 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" (OuterVolumeSpecName: "kube-api-access-w9rds") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "kube-api-access-w9rds". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.492932 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.493063 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.493150 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.493368 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.493597 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" (OuterVolumeSpecName: "kube-api-access-8tdtz") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "kube-api-access-8tdtz". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.493674 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" (OuterVolumeSpecName: "kube-api-access-qg5z5") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "kube-api-access-qg5z5". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.493681 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" (OuterVolumeSpecName: "config") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.493697 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" (OuterVolumeSpecName: "kube-api-access-w7l8j") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "kube-api-access-w7l8j". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.494084 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" (OuterVolumeSpecName: "kube-api-access-lzf88") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "kube-api-access-lzf88". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.494378 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.494414 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" (OuterVolumeSpecName: "kube-api-access-4d4hj") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "kube-api-access-4d4hj". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.494677 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" (OuterVolumeSpecName: "kube-api-access-mg5zb") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "kube-api-access-mg5zb". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.494752 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.495353 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.495712 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" (OuterVolumeSpecName: "kube-api-access-x7zkh") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "kube-api-access-x7zkh". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.495513 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.496005 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" (OuterVolumeSpecName: "utilities") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.496168 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.496853 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" (OuterVolumeSpecName: "kube-api-access-zgdk5") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "kube-api-access-zgdk5". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.496874 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.496921 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" (OuterVolumeSpecName: "control-plane-machine-set-operator-tls") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "control-plane-machine-set-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.497572 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.497765 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" (OuterVolumeSpecName: "kube-api-access-x2m85") pod "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" (UID: "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d"). InnerVolumeSpecName "kube-api-access-x2m85". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.497836 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" (OuterVolumeSpecName: "config") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.497848 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" (OuterVolumeSpecName: "images") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.497911 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.498277 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" (OuterVolumeSpecName: "config") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.498313 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" (OuterVolumeSpecName: "kube-api-access-pj782") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "kube-api-access-pj782". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.498630 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" (OuterVolumeSpecName: "kube-api-access-pjr6v") pod "49ef4625-1d3a-4a9f-b595-c2433d32326d" (UID: "49ef4625-1d3a-4a9f-b595-c2433d32326d"). InnerVolumeSpecName "kube-api-access-pjr6v". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.499330 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.499488 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.499668 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.499763 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.500159 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" (OuterVolumeSpecName: "config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: E1011 03:55:25.500260 4798 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-11 03:55:26.00023678 +0000 UTC m=+21.336526556 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.500804 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" (OuterVolumeSpecName: "config") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.500825 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.500862 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.500898 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.500929 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.500956 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.500976 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" (OuterVolumeSpecName: "kube-api-access-jkwtn") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "kube-api-access-jkwtn". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.500982 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.501058 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.501115 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.501153 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.501215 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.501251 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.501314 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.501367 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.501426 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.501461 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") pod \"44663579-783b-4372-86d6-acf235a62d72\" (UID: \"44663579-783b-4372-86d6-acf235a62d72\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.501515 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.501546 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.501766 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.501805 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.501865 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.501921 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.501955 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.502005 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.502044 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.502100 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.502129 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.502221 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.502280 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.502316 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.502377 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.502681 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") pod \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\" (UID: \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.502738 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.502766 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.502830 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.502859 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.502916 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.502949 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.503507 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.503533 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.503577 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.503602 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.503620 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.503661 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.503685 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.503708 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.503762 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.503789 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.503842 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.503870 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.503924 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.503955 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.504019 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.504125 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/42571bc8-2186-4e3b-bba9-28f5a8f364d0-mcd-auth-proxy-config\") pod \"machine-config-daemon-h28s2\" (UID: \"42571bc8-2186-4e3b-bba9-28f5a8f364d0\") " pod="openshift-machine-config-operator/machine-config-daemon-h28s2" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.504270 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/42571bc8-2186-4e3b-bba9-28f5a8f364d0-proxy-tls\") pod \"machine-config-daemon-h28s2\" (UID: \"42571bc8-2186-4e3b-bba9-28f5a8f364d0\") " pod="openshift-machine-config-operator/machine-config-daemon-h28s2" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.504303 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q5gdw\" (UniqueName: \"kubernetes.io/projected/42571bc8-2186-4e3b-bba9-28f5a8f364d0-kube-api-access-q5gdw\") pod \"machine-config-daemon-h28s2\" (UID: \"42571bc8-2186-4e3b-bba9-28f5a8f364d0\") " pod="openshift-machine-config-operator/machine-config-daemon-h28s2" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.504333 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/42571bc8-2186-4e3b-bba9-28f5a8f364d0-rootfs\") pod \"machine-config-daemon-h28s2\" (UID: \"42571bc8-2186-4e3b-bba9-28f5a8f364d0\") " pod="openshift-machine-config-operator/machine-config-daemon-h28s2" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.504375 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.504533 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.504577 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/e999fc8d-dd8d-415f-b72b-1c6189cf18a9-hosts-file\") pod \"node-resolver-kszgd\" (UID: \"e999fc8d-dd8d-415f-b72b-1c6189cf18a9\") " pod="openshift-dns/node-resolver-kszgd" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.504672 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h6wm8\" (UniqueName: \"kubernetes.io/projected/e999fc8d-dd8d-415f-b72b-1c6189cf18a9-kube-api-access-h6wm8\") pod \"node-resolver-kszgd\" (UID: \"e999fc8d-dd8d-415f-b72b-1c6189cf18a9\") " pod="openshift-dns/node-resolver-kszgd" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.504784 4798 reconciler_common.go:293] "Volume detached for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.504812 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.504831 4798 reconciler_common.go:293] "Volume detached for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.504849 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.504866 4798 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.504880 4798 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.504895 4798 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.504909 4798 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.504927 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.505156 4798 reconciler_common.go:293] "Volume detached for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.505184 4798 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.501561 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" (OuterVolumeSpecName: "service-ca") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.507460 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.501827 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" (OuterVolumeSpecName: "signing-cabundle") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-cabundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.502122 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" (OuterVolumeSpecName: "kube-api-access-wxkg8") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "kube-api-access-wxkg8". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.502599 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" (OuterVolumeSpecName: "kube-api-access-279lb") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "kube-api-access-279lb". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.502692 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" (OuterVolumeSpecName: "etcd-service-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.502903 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" (OuterVolumeSpecName: "image-import-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "image-import-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.503252 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" (OuterVolumeSpecName: "kube-api-access-d6qdx") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "kube-api-access-d6qdx". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.503355 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" (OuterVolumeSpecName: "config") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.503984 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.503993 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" (OuterVolumeSpecName: "kube-api-access-vt5rc") pod "44663579-783b-4372-86d6-acf235a62d72" (UID: "44663579-783b-4372-86d6-acf235a62d72"). InnerVolumeSpecName "kube-api-access-vt5rc". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.504437 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.504468 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.504550 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" (OuterVolumeSpecName: "kube-api-access-v47cf") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "kube-api-access-v47cf". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.500640 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" (OuterVolumeSpecName: "kube-api-access-bf2bz") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "kube-api-access-bf2bz". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.504982 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" (OuterVolumeSpecName: "kube-api-access-6g6sz") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "kube-api-access-6g6sz". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.504966 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.505063 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.505318 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.506326 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.506468 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.506692 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.506836 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" (OuterVolumeSpecName: "cert") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.506867 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" (OuterVolumeSpecName: "kube-api-access-kfwg7") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "kube-api-access-kfwg7". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.506909 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" (OuterVolumeSpecName: "ovn-control-plane-metrics-cert") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovn-control-plane-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.506904 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" (OuterVolumeSpecName: "multus-daemon-config") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "multus-daemon-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.507866 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.507995 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.508243 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.508321 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" (OuterVolumeSpecName: "config") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.508343 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" (OuterVolumeSpecName: "etcd-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.506960 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.507201 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" (OuterVolumeSpecName: "cni-sysctl-allowlist") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-sysctl-allowlist". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.507850 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.506928 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.508572 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" (OuterVolumeSpecName: "kube-api-access-2w9zh") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "kube-api-access-2w9zh". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.508630 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" (OuterVolumeSpecName: "kube-api-access-cfbct") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "kube-api-access-cfbct". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.508922 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" (OuterVolumeSpecName: "serviceca") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "serviceca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.508979 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.509838 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.510869 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.511130 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.511375 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.511491 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.511524 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.511725 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" (OuterVolumeSpecName: "machine-approver-tls") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "machine-approver-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.511841 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.511951 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" (OuterVolumeSpecName: "utilities") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.512152 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.512225 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" (OuterVolumeSpecName: "kube-api-access-htfz6") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "kube-api-access-htfz6". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.512349 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.512341 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.512322 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.512561 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" (OuterVolumeSpecName: "kube-api-access-xcphl") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "kube-api-access-xcphl". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.512945 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" (OuterVolumeSpecName: "kube-api-access-s4n52") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "kube-api-access-s4n52". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.512964 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.513078 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" (OuterVolumeSpecName: "service-ca") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.513094 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" (OuterVolumeSpecName: "config-volume") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.513159 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" (OuterVolumeSpecName: "console-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.513528 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.514095 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" (OuterVolumeSpecName: "config") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.515195 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/42571bc8-2186-4e3b-bba9-28f5a8f364d0-rootfs\") pod \"machine-config-daemon-h28s2\" (UID: \"42571bc8-2186-4e3b-bba9-28f5a8f364d0\") " pod="openshift-machine-config-operator/machine-config-daemon-h28s2" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.515345 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.518559 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.518651 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.518745 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/e999fc8d-dd8d-415f-b72b-1c6189cf18a9-hosts-file\") pod \"node-resolver-kszgd\" (UID: \"e999fc8d-dd8d-415f-b72b-1c6189cf18a9\") " pod="openshift-dns/node-resolver-kszgd" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.518925 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.519319 4798 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.519277 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" (OuterVolumeSpecName: "kube-api-access-x4zgh") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "kube-api-access-x4zgh". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.519343 4798 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.519368 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.519386 4798 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.519418 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.519435 4798 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.519449 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.519465 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.519479 4798 reconciler_common.go:293] "Volume detached for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.519494 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.519509 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.519522 4798 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.519536 4798 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.519551 4798 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.519567 4798 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.519583 4798 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.519597 4798 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.519612 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.519628 4798 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.519642 4798 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.519655 4798 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.519667 4798 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.519681 4798 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.519696 4798 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.519710 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.519709 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" (OuterVolumeSpecName: "utilities") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.520040 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" (OuterVolumeSpecName: "kube-api-access-gf66m") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "kube-api-access-gf66m". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.520317 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/42571bc8-2186-4e3b-bba9-28f5a8f364d0-mcd-auth-proxy-config\") pod \"machine-config-daemon-h28s2\" (UID: \"42571bc8-2186-4e3b-bba9-28f5a8f364d0\") " pod="openshift-machine-config-operator/machine-config-daemon-h28s2" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.520652 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" (OuterVolumeSpecName: "kube-api-access-tk88c") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "kube-api-access-tk88c". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.521197 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" (OuterVolumeSpecName: "samples-operator-tls") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "samples-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.521284 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" (OuterVolumeSpecName: "client-ca") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.522002 4798 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.522022 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.522034 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.522045 4798 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.521286 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.521322 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" (OuterVolumeSpecName: "mcd-auth-proxy-config") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "mcd-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.521592 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" (OuterVolumeSpecName: "kube-api-access-mnrrd") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "kube-api-access-mnrrd". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.521690 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" (OuterVolumeSpecName: "kube-api-access-9xfj7") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "kube-api-access-9xfj7". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.521879 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" (OuterVolumeSpecName: "utilities") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.521995 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" (OuterVolumeSpecName: "image-registry-operator-tls") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "image-registry-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.522658 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" (OuterVolumeSpecName: "kube-api-access-2d4wz") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "kube-api-access-2d4wz". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.522904 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.523070 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.523333 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.523332 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.523855 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.523941 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" (OuterVolumeSpecName: "kube-api-access-jhbk2") pod "bd23aa5c-e532-4e53-bccf-e79f130c5ae8" (UID: "bd23aa5c-e532-4e53-bccf-e79f130c5ae8"). InnerVolumeSpecName "kube-api-access-jhbk2". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.523993 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" (OuterVolumeSpecName: "config") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.524358 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" (OuterVolumeSpecName: "kube-api-access-xcgwh") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "kube-api-access-xcgwh". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.524454 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.524763 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" (OuterVolumeSpecName: "webhook-certs") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "webhook-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.524761 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" (OuterVolumeSpecName: "machine-api-operator-tls") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "machine-api-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.524796 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" (OuterVolumeSpecName: "config") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.524547 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" (OuterVolumeSpecName: "kube-api-access-w4xd4") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "kube-api-access-w4xd4". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.525181 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.525352 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.506881 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.525797 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" (OuterVolumeSpecName: "kube-api-access-rnphk") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "kube-api-access-rnphk". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.525907 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" (OuterVolumeSpecName: "config") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.526068 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" (OuterVolumeSpecName: "package-server-manager-serving-cert") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "package-server-manager-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.526491 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" (OuterVolumeSpecName: "kube-api-access-dbsvg") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "kube-api-access-dbsvg". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.526553 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.527112 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" (OuterVolumeSpecName: "config") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.527173 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.527307 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.527773 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.527992 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" (OuterVolumeSpecName: "mcc-auth-proxy-config") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "mcc-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.528268 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" (OuterVolumeSpecName: "available-featuregates") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "available-featuregates". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.529025 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q5gdw\" (UniqueName: \"kubernetes.io/projected/42571bc8-2186-4e3b-bba9-28f5a8f364d0-kube-api-access-q5gdw\") pod \"machine-config-daemon-h28s2\" (UID: \"42571bc8-2186-4e3b-bba9-28f5a8f364d0\") " pod="openshift-machine-config-operator/machine-config-daemon-h28s2" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.528406 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/42571bc8-2186-4e3b-bba9-28f5a8f364d0-proxy-tls\") pod \"machine-config-daemon-h28s2\" (UID: \"42571bc8-2186-4e3b-bba9-28f5a8f364d0\") " pod="openshift-machine-config-operator/machine-config-daemon-h28s2" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.528934 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.528776 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.531139 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" (OuterVolumeSpecName: "audit") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "audit". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.531305 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" (OuterVolumeSpecName: "kube-api-access-zkvpv") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "kube-api-access-zkvpv". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.531335 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.531715 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" (OuterVolumeSpecName: "images") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.532171 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" (OuterVolumeSpecName: "kube-api-access-lz9wn") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "kube-api-access-lz9wn". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.532354 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.532507 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" (OuterVolumeSpecName: "tmpfs") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "tmpfs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.532601 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" (OuterVolumeSpecName: "webhook-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "webhook-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.536754 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.540481 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" (OuterVolumeSpecName: "kube-api-access-d4lsv") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "kube-api-access-d4lsv". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.540585 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" (OuterVolumeSpecName: "kube-api-access-ngvvp") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "kube-api-access-ngvvp". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.542111 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" (OuterVolumeSpecName: "signing-key") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.542369 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" (OuterVolumeSpecName: "config") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.542842 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.543074 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" (OuterVolumeSpecName: "kube-api-access-7c4vf") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "kube-api-access-7c4vf". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.543904 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.545829 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.547633 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" (OuterVolumeSpecName: "kube-api-access-249nr") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "kube-api-access-249nr". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.547900 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.549096 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.550476 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.551386 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h6wm8\" (UniqueName: \"kubernetes.io/projected/e999fc8d-dd8d-415f-b72b-1c6189cf18a9-kube-api-access-h6wm8\") pod \"node-resolver-kszgd\" (UID: \"e999fc8d-dd8d-415f-b72b-1c6189cf18a9\") " pod="openshift-dns/node-resolver-kszgd" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.552100 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.553447 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" (OuterVolumeSpecName: "kube-api-access-pcxfs") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "kube-api-access-pcxfs". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.553518 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" (OuterVolumeSpecName: "kube-api-access-qs4fp") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "kube-api-access-qs4fp". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.556619 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" (OuterVolumeSpecName: "certs") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.557013 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" (OuterVolumeSpecName: "config") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.557097 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" (OuterVolumeSpecName: "kube-api-access-fcqwp") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "kube-api-access-fcqwp". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.557674 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.557688 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.558359 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.560046 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" (OuterVolumeSpecName: "client-ca") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.560250 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" (OuterVolumeSpecName: "kube-api-access-6ccd8") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "kube-api-access-6ccd8". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.560339 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.560709 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.561003 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"f30a78913d6c7e7c19d8b989c20b7402901b9a4daf385278c7e922a7831dd1f9"} Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.561185 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.561299 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.561548 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.563780 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" (OuterVolumeSpecName: "config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.565969 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.584313 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.585066 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.588430 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.593325 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.596372 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.601561 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.612481 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42571bc8-2186-4e3b-bba9-28f5a8f364d0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5gdw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5gdw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-h28s2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.620580 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kszgd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e999fc8d-dd8d-415f-b72b-1c6189cf18a9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h6wm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kszgd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.622763 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.622844 4798 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.622900 4798 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.622964 4798 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.623030 4798 reconciler_common.go:293] "Volume detached for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.623083 4798 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.623132 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.623186 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.623242 4798 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.623301 4798 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.623357 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.623429 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.623496 4798 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.623554 4798 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.623606 4798 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.623663 4798 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.623720 4798 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.623775 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.623827 4798 reconciler_common.go:293] "Volume detached for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.623881 4798 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.623931 4798 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.624058 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.624120 4798 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.624176 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.624241 4798 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.624297 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.624351 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.624424 4798 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.624485 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.624539 4798 reconciler_common.go:293] "Volume detached for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.624595 4798 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.624647 4798 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.624698 4798 reconciler_common.go:293] "Volume detached for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.624756 4798 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.624812 4798 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.624868 4798 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.624927 4798 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.624979 4798 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.625035 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.625092 4798 reconciler_common.go:293] "Volume detached for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.625152 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.625211 4798 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.625264 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.625328 4798 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.625423 4798 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.625502 4798 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.625580 4798 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.625635 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.625697 4798 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.625758 4798 reconciler_common.go:293] "Volume detached for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.625822 4798 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.625882 4798 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.625937 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.625991 4798 reconciler_common.go:293] "Volume detached for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.626063 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.626130 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.626194 4798 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.626248 4798 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.626335 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.626412 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.626470 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.626540 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.626609 4798 reconciler_common.go:293] "Volume detached for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.626678 4798 reconciler_common.go:293] "Volume detached for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.626751 4798 reconciler_common.go:293] "Volume detached for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.626826 4798 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.626899 4798 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.626965 4798 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.627028 4798 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.627086 4798 reconciler_common.go:293] "Volume detached for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.627142 4798 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.627194 4798 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.627256 4798 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.627316 4798 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.627371 4798 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.627455 4798 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.627524 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.627585 4798 reconciler_common.go:293] "Volume detached for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.627643 4798 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.627711 4798 reconciler_common.go:293] "Volume detached for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.627779 4798 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.627834 4798 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.627888 4798 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.627948 4798 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.628008 4798 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.628064 4798 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.628118 4798 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.628174 4798 reconciler_common.go:293] "Volume detached for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.628229 4798 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.628287 4798 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.628341 4798 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.628418 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.628485 4798 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.628540 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.628597 4798 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.628654 4798 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.628711 4798 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.628768 4798 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.628836 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.628908 4798 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.628961 4798 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.629019 4798 reconciler_common.go:293] "Volume detached for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.629075 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.629134 4798 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.629193 4798 reconciler_common.go:293] "Volume detached for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.629246 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.629306 4798 reconciler_common.go:293] "Volume detached for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.629363 4798 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.629443 4798 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.629500 4798 reconciler_common.go:293] "Volume detached for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.629558 4798 reconciler_common.go:293] "Volume detached for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.629612 4798 reconciler_common.go:293] "Volume detached for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.629666 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.629719 4798 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.629776 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.629830 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.629885 4798 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.629954 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.630018 4798 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.630072 4798 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.630129 4798 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.630183 4798 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.630236 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.630315 4798 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.630373 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.630454 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.630512 4798 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.630568 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.630622 4798 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.630845 4798 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.630907 4798 reconciler_common.go:293] "Volume detached for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.630965 4798 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.631032 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.631100 4798 reconciler_common.go:293] "Volume detached for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.631166 4798 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.631229 4798 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.631289 4798 reconciler_common.go:293] "Volume detached for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.631346 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.631512 4798 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.631620 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.631690 4798 reconciler_common.go:293] "Volume detached for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.631835 4798 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.631897 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.631950 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.632004 4798 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.632059 4798 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.632134 4798 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.632214 4798 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.632281 4798 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.632353 4798 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.632448 4798 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.632533 4798 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.632613 4798 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.632690 4798 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") on node \"crc\" DevicePath \"\"" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.633342 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.633751 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-p6xdd"] Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.634091 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-additional-cni-plugins-7fk5l"] Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.634359 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-p6xdd" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.635182 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-7fk5l" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.636628 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-copy-resources" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.636782 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"openshift-service-ca.crt" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.636957 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"default-cni-sysctl-allowlist" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.636989 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"multus-daemon-config" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.637088 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"kube-root-ca.crt" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.638176 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.638353 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"default-dockercfg-2q5b6" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.655001 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.696564 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.701803 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.723622 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.727881 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-kszgd" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.734325 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/bd9c0e44-3329-422a-907b-e9e9bb6194cc-host-run-netns\") pod \"multus-p6xdd\" (UID: \"bd9c0e44-3329-422a-907b-e9e9bb6194cc\") " pod="openshift-multus/multus-p6xdd" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.734437 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/4b6a179c-1d23-4cb2-91b9-d34496a74456-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-7fk5l\" (UID: \"4b6a179c-1d23-4cb2-91b9-d34496a74456\") " pod="openshift-multus/multus-additional-cni-plugins-7fk5l" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.734524 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/bd9c0e44-3329-422a-907b-e9e9bb6194cc-multus-daemon-config\") pod \"multus-p6xdd\" (UID: \"bd9c0e44-3329-422a-907b-e9e9bb6194cc\") " pod="openshift-multus/multus-p6xdd" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.734566 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/4b6a179c-1d23-4cb2-91b9-d34496a74456-os-release\") pod \"multus-additional-cni-plugins-7fk5l\" (UID: \"4b6a179c-1d23-4cb2-91b9-d34496a74456\") " pod="openshift-multus/multus-additional-cni-plugins-7fk5l" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.734600 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4b6a179c-1d23-4cb2-91b9-d34496a74456-cni-binary-copy\") pod \"multus-additional-cni-plugins-7fk5l\" (UID: \"4b6a179c-1d23-4cb2-91b9-d34496a74456\") " pod="openshift-multus/multus-additional-cni-plugins-7fk5l" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.734633 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/bd9c0e44-3329-422a-907b-e9e9bb6194cc-multus-conf-dir\") pod \"multus-p6xdd\" (UID: \"bd9c0e44-3329-422a-907b-e9e9bb6194cc\") " pod="openshift-multus/multus-p6xdd" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.734654 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/bd9c0e44-3329-422a-907b-e9e9bb6194cc-host-var-lib-kubelet\") pod \"multus-p6xdd\" (UID: \"bd9c0e44-3329-422a-907b-e9e9bb6194cc\") " pod="openshift-multus/multus-p6xdd" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.734678 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/bd9c0e44-3329-422a-907b-e9e9bb6194cc-etc-kubernetes\") pod \"multus-p6xdd\" (UID: \"bd9c0e44-3329-422a-907b-e9e9bb6194cc\") " pod="openshift-multus/multus-p6xdd" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.734759 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p9wst\" (UniqueName: \"kubernetes.io/projected/bd9c0e44-3329-422a-907b-e9e9bb6194cc-kube-api-access-p9wst\") pod \"multus-p6xdd\" (UID: \"bd9c0e44-3329-422a-907b-e9e9bb6194cc\") " pod="openshift-multus/multus-p6xdd" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.734830 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/bd9c0e44-3329-422a-907b-e9e9bb6194cc-system-cni-dir\") pod \"multus-p6xdd\" (UID: \"bd9c0e44-3329-422a-907b-e9e9bb6194cc\") " pod="openshift-multus/multus-p6xdd" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.734856 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/bd9c0e44-3329-422a-907b-e9e9bb6194cc-os-release\") pod \"multus-p6xdd\" (UID: \"bd9c0e44-3329-422a-907b-e9e9bb6194cc\") " pod="openshift-multus/multus-p6xdd" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.734888 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/bd9c0e44-3329-422a-907b-e9e9bb6194cc-cni-binary-copy\") pod \"multus-p6xdd\" (UID: \"bd9c0e44-3329-422a-907b-e9e9bb6194cc\") " pod="openshift-multus/multus-p6xdd" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.734920 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/bd9c0e44-3329-422a-907b-e9e9bb6194cc-host-run-k8s-cni-cncf-io\") pod \"multus-p6xdd\" (UID: \"bd9c0e44-3329-422a-907b-e9e9bb6194cc\") " pod="openshift-multus/multus-p6xdd" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.734943 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/bd9c0e44-3329-422a-907b-e9e9bb6194cc-host-var-lib-cni-multus\") pod \"multus-p6xdd\" (UID: \"bd9c0e44-3329-422a-907b-e9e9bb6194cc\") " pod="openshift-multus/multus-p6xdd" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.734960 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/4b6a179c-1d23-4cb2-91b9-d34496a74456-tuning-conf-dir\") pod \"multus-additional-cni-plugins-7fk5l\" (UID: \"4b6a179c-1d23-4cb2-91b9-d34496a74456\") " pod="openshift-multus/multus-additional-cni-plugins-7fk5l" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.734996 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/4b6a179c-1d23-4cb2-91b9-d34496a74456-cnibin\") pod \"multus-additional-cni-plugins-7fk5l\" (UID: \"4b6a179c-1d23-4cb2-91b9-d34496a74456\") " pod="openshift-multus/multus-additional-cni-plugins-7fk5l" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.735043 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/bd9c0e44-3329-422a-907b-e9e9bb6194cc-cnibin\") pod \"multus-p6xdd\" (UID: \"bd9c0e44-3329-422a-907b-e9e9bb6194cc\") " pod="openshift-multus/multus-p6xdd" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.735076 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/bd9c0e44-3329-422a-907b-e9e9bb6194cc-host-var-lib-cni-bin\") pod \"multus-p6xdd\" (UID: \"bd9c0e44-3329-422a-907b-e9e9bb6194cc\") " pod="openshift-multus/multus-p6xdd" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.735102 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/bd9c0e44-3329-422a-907b-e9e9bb6194cc-host-run-multus-certs\") pod \"multus-p6xdd\" (UID: \"bd9c0e44-3329-422a-907b-e9e9bb6194cc\") " pod="openshift-multus/multus-p6xdd" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.735234 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/4b6a179c-1d23-4cb2-91b9-d34496a74456-system-cni-dir\") pod \"multus-additional-cni-plugins-7fk5l\" (UID: \"4b6a179c-1d23-4cb2-91b9-d34496a74456\") " pod="openshift-multus/multus-additional-cni-plugins-7fk5l" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.735327 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w7ths\" (UniqueName: \"kubernetes.io/projected/4b6a179c-1d23-4cb2-91b9-d34496a74456-kube-api-access-w7ths\") pod \"multus-additional-cni-plugins-7fk5l\" (UID: \"4b6a179c-1d23-4cb2-91b9-d34496a74456\") " pod="openshift-multus/multus-additional-cni-plugins-7fk5l" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.735380 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/bd9c0e44-3329-422a-907b-e9e9bb6194cc-hostroot\") pod \"multus-p6xdd\" (UID: \"bd9c0e44-3329-422a-907b-e9e9bb6194cc\") " pod="openshift-multus/multus-p6xdd" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.735438 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/bd9c0e44-3329-422a-907b-e9e9bb6194cc-multus-socket-dir-parent\") pod \"multus-p6xdd\" (UID: \"bd9c0e44-3329-422a-907b-e9e9bb6194cc\") " pod="openshift-multus/multus-p6xdd" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.735496 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/bd9c0e44-3329-422a-907b-e9e9bb6194cc-multus-cni-dir\") pod \"multus-p6xdd\" (UID: \"bd9c0e44-3329-422a-907b-e9e9bb6194cc\") " pod="openshift-multus/multus-p6xdd" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.748817 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.748884 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 11 03:55:25 crc kubenswrapper[4798]: W1011 03:55:25.751030 4798 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode999fc8d_dd8d_415f_b72b_1c6189cf18a9.slice/crio-1047bfca4e917027c853191e3b345bc94992a65a3c2a2b4ef69346fbf81745f4 WatchSource:0}: Error finding container 1047bfca4e917027c853191e3b345bc94992a65a3c2a2b4ef69346fbf81745f4: Status 404 returned error can't find the container with id 1047bfca4e917027c853191e3b345bc94992a65a3c2a2b4ef69346fbf81745f4 Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.755049 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.768204 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.771472 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 11 03:55:25 crc kubenswrapper[4798]: W1011 03:55:25.774307 4798 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod37a5e44f_9a88_4405_be8a_b645485e7312.slice/crio-11c6cd4e8ce02e0d1005b9bc4a75374c7760832e8b9197a8b42b12eaa7e34c87 WatchSource:0}: Error finding container 11c6cd4e8ce02e0d1005b9bc4a75374c7760832e8b9197a8b42b12eaa7e34c87: Status 404 returned error can't find the container with id 11c6cd4e8ce02e0d1005b9bc4a75374c7760832e8b9197a8b42b12eaa7e34c87 Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.780677 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-p6xdd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bd9c0e44-3329-422a-907b-e9e9bb6194cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p9wst\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-p6xdd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 11 03:55:25 crc kubenswrapper[4798]: W1011 03:55:25.783908 4798 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod42571bc8_2186_4e3b_bba9_28f5a8f364d0.slice/crio-22688dfe574613f9fe6031353d8ddd763ad2a6cca72a126895650a4c7bccd6f1 WatchSource:0}: Error finding container 22688dfe574613f9fe6031353d8ddd763ad2a6cca72a126895650a4c7bccd6f1: Status 404 returned error can't find the container with id 22688dfe574613f9fe6031353d8ddd763ad2a6cca72a126895650a4c7bccd6f1 Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.793213 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-7fk5l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4b6a179c-1d23-4cb2-91b9-d34496a74456\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-7fk5l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.803446 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.817491 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42571bc8-2186-4e3b-bba9-28f5a8f364d0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5gdw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5gdw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-h28s2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.827182 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.836667 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/bd9c0e44-3329-422a-907b-e9e9bb6194cc-multus-cni-dir\") pod \"multus-p6xdd\" (UID: \"bd9c0e44-3329-422a-907b-e9e9bb6194cc\") " pod="openshift-multus/multus-p6xdd" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.836723 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/bd9c0e44-3329-422a-907b-e9e9bb6194cc-host-run-netns\") pod \"multus-p6xdd\" (UID: \"bd9c0e44-3329-422a-907b-e9e9bb6194cc\") " pod="openshift-multus/multus-p6xdd" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.836749 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/4b6a179c-1d23-4cb2-91b9-d34496a74456-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-7fk5l\" (UID: \"4b6a179c-1d23-4cb2-91b9-d34496a74456\") " pod="openshift-multus/multus-additional-cni-plugins-7fk5l" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.836787 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/bd9c0e44-3329-422a-907b-e9e9bb6194cc-multus-daemon-config\") pod \"multus-p6xdd\" (UID: \"bd9c0e44-3329-422a-907b-e9e9bb6194cc\") " pod="openshift-multus/multus-p6xdd" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.836811 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/4b6a179c-1d23-4cb2-91b9-d34496a74456-os-release\") pod \"multus-additional-cni-plugins-7fk5l\" (UID: \"4b6a179c-1d23-4cb2-91b9-d34496a74456\") " pod="openshift-multus/multus-additional-cni-plugins-7fk5l" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.836835 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4b6a179c-1d23-4cb2-91b9-d34496a74456-cni-binary-copy\") pod \"multus-additional-cni-plugins-7fk5l\" (UID: \"4b6a179c-1d23-4cb2-91b9-d34496a74456\") " pod="openshift-multus/multus-additional-cni-plugins-7fk5l" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.836858 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/bd9c0e44-3329-422a-907b-e9e9bb6194cc-multus-conf-dir\") pod \"multus-p6xdd\" (UID: \"bd9c0e44-3329-422a-907b-e9e9bb6194cc\") " pod="openshift-multus/multus-p6xdd" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.836882 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/bd9c0e44-3329-422a-907b-e9e9bb6194cc-system-cni-dir\") pod \"multus-p6xdd\" (UID: \"bd9c0e44-3329-422a-907b-e9e9bb6194cc\") " pod="openshift-multus/multus-p6xdd" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.836913 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/bd9c0e44-3329-422a-907b-e9e9bb6194cc-host-var-lib-kubelet\") pod \"multus-p6xdd\" (UID: \"bd9c0e44-3329-422a-907b-e9e9bb6194cc\") " pod="openshift-multus/multus-p6xdd" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.836944 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/bd9c0e44-3329-422a-907b-e9e9bb6194cc-etc-kubernetes\") pod \"multus-p6xdd\" (UID: \"bd9c0e44-3329-422a-907b-e9e9bb6194cc\") " pod="openshift-multus/multus-p6xdd" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.836973 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p9wst\" (UniqueName: \"kubernetes.io/projected/bd9c0e44-3329-422a-907b-e9e9bb6194cc-kube-api-access-p9wst\") pod \"multus-p6xdd\" (UID: \"bd9c0e44-3329-422a-907b-e9e9bb6194cc\") " pod="openshift-multus/multus-p6xdd" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.837005 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/bd9c0e44-3329-422a-907b-e9e9bb6194cc-os-release\") pod \"multus-p6xdd\" (UID: \"bd9c0e44-3329-422a-907b-e9e9bb6194cc\") " pod="openshift-multus/multus-p6xdd" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.837037 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/bd9c0e44-3329-422a-907b-e9e9bb6194cc-host-var-lib-cni-multus\") pod \"multus-p6xdd\" (UID: \"bd9c0e44-3329-422a-907b-e9e9bb6194cc\") " pod="openshift-multus/multus-p6xdd" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.837067 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/4b6a179c-1d23-4cb2-91b9-d34496a74456-tuning-conf-dir\") pod \"multus-additional-cni-plugins-7fk5l\" (UID: \"4b6a179c-1d23-4cb2-91b9-d34496a74456\") " pod="openshift-multus/multus-additional-cni-plugins-7fk5l" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.837097 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/bd9c0e44-3329-422a-907b-e9e9bb6194cc-cni-binary-copy\") pod \"multus-p6xdd\" (UID: \"bd9c0e44-3329-422a-907b-e9e9bb6194cc\") " pod="openshift-multus/multus-p6xdd" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.837128 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/bd9c0e44-3329-422a-907b-e9e9bb6194cc-host-run-k8s-cni-cncf-io\") pod \"multus-p6xdd\" (UID: \"bd9c0e44-3329-422a-907b-e9e9bb6194cc\") " pod="openshift-multus/multus-p6xdd" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.837241 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/4b6a179c-1d23-4cb2-91b9-d34496a74456-cnibin\") pod \"multus-additional-cni-plugins-7fk5l\" (UID: \"4b6a179c-1d23-4cb2-91b9-d34496a74456\") " pod="openshift-multus/multus-additional-cni-plugins-7fk5l" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.837280 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/bd9c0e44-3329-422a-907b-e9e9bb6194cc-host-var-lib-cni-bin\") pod \"multus-p6xdd\" (UID: \"bd9c0e44-3329-422a-907b-e9e9bb6194cc\") " pod="openshift-multus/multus-p6xdd" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.837344 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/bd9c0e44-3329-422a-907b-e9e9bb6194cc-host-run-multus-certs\") pod \"multus-p6xdd\" (UID: \"bd9c0e44-3329-422a-907b-e9e9bb6194cc\") " pod="openshift-multus/multus-p6xdd" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.837690 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/bd9c0e44-3329-422a-907b-e9e9bb6194cc-cnibin\") pod \"multus-p6xdd\" (UID: \"bd9c0e44-3329-422a-907b-e9e9bb6194cc\") " pod="openshift-multus/multus-p6xdd" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.837716 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/4b6a179c-1d23-4cb2-91b9-d34496a74456-system-cni-dir\") pod \"multus-additional-cni-plugins-7fk5l\" (UID: \"4b6a179c-1d23-4cb2-91b9-d34496a74456\") " pod="openshift-multus/multus-additional-cni-plugins-7fk5l" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.837740 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w7ths\" (UniqueName: \"kubernetes.io/projected/4b6a179c-1d23-4cb2-91b9-d34496a74456-kube-api-access-w7ths\") pod \"multus-additional-cni-plugins-7fk5l\" (UID: \"4b6a179c-1d23-4cb2-91b9-d34496a74456\") " pod="openshift-multus/multus-additional-cni-plugins-7fk5l" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.837765 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/bd9c0e44-3329-422a-907b-e9e9bb6194cc-multus-socket-dir-parent\") pod \"multus-p6xdd\" (UID: \"bd9c0e44-3329-422a-907b-e9e9bb6194cc\") " pod="openshift-multus/multus-p6xdd" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.837784 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/bd9c0e44-3329-422a-907b-e9e9bb6194cc-hostroot\") pod \"multus-p6xdd\" (UID: \"bd9c0e44-3329-422a-907b-e9e9bb6194cc\") " pod="openshift-multus/multus-p6xdd" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.837960 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/bd9c0e44-3329-422a-907b-e9e9bb6194cc-hostroot\") pod \"multus-p6xdd\" (UID: \"bd9c0e44-3329-422a-907b-e9e9bb6194cc\") " pod="openshift-multus/multus-p6xdd" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.837742 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/bd9c0e44-3329-422a-907b-e9e9bb6194cc-host-var-lib-kubelet\") pod \"multus-p6xdd\" (UID: \"bd9c0e44-3329-422a-907b-e9e9bb6194cc\") " pod="openshift-multus/multus-p6xdd" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.838233 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/bd9c0e44-3329-422a-907b-e9e9bb6194cc-etc-kubernetes\") pod \"multus-p6xdd\" (UID: \"bd9c0e44-3329-422a-907b-e9e9bb6194cc\") " pod="openshift-multus/multus-p6xdd" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.838430 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/4b6a179c-1d23-4cb2-91b9-d34496a74456-os-release\") pod \"multus-additional-cni-plugins-7fk5l\" (UID: \"4b6a179c-1d23-4cb2-91b9-d34496a74456\") " pod="openshift-multus/multus-additional-cni-plugins-7fk5l" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.838635 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/bd9c0e44-3329-422a-907b-e9e9bb6194cc-os-release\") pod \"multus-p6xdd\" (UID: \"bd9c0e44-3329-422a-907b-e9e9bb6194cc\") " pod="openshift-multus/multus-p6xdd" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.838698 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/bd9c0e44-3329-422a-907b-e9e9bb6194cc-host-var-lib-cni-multus\") pod \"multus-p6xdd\" (UID: \"bd9c0e44-3329-422a-907b-e9e9bb6194cc\") " pod="openshift-multus/multus-p6xdd" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.838838 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/bd9c0e44-3329-422a-907b-e9e9bb6194cc-host-run-multus-certs\") pod \"multus-p6xdd\" (UID: \"bd9c0e44-3329-422a-907b-e9e9bb6194cc\") " pod="openshift-multus/multus-p6xdd" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.839119 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4b6a179c-1d23-4cb2-91b9-d34496a74456-cni-binary-copy\") pod \"multus-additional-cni-plugins-7fk5l\" (UID: \"4b6a179c-1d23-4cb2-91b9-d34496a74456\") " pod="openshift-multus/multus-additional-cni-plugins-7fk5l" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.839182 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/bd9c0e44-3329-422a-907b-e9e9bb6194cc-multus-conf-dir\") pod \"multus-p6xdd\" (UID: \"bd9c0e44-3329-422a-907b-e9e9bb6194cc\") " pod="openshift-multus/multus-p6xdd" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.839234 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/4b6a179c-1d23-4cb2-91b9-d34496a74456-tuning-conf-dir\") pod \"multus-additional-cni-plugins-7fk5l\" (UID: \"4b6a179c-1d23-4cb2-91b9-d34496a74456\") " pod="openshift-multus/multus-additional-cni-plugins-7fk5l" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.839295 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/bd9c0e44-3329-422a-907b-e9e9bb6194cc-cnibin\") pod \"multus-p6xdd\" (UID: \"bd9c0e44-3329-422a-907b-e9e9bb6194cc\") " pod="openshift-multus/multus-p6xdd" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.839327 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/4b6a179c-1d23-4cb2-91b9-d34496a74456-system-cni-dir\") pod \"multus-additional-cni-plugins-7fk5l\" (UID: \"4b6a179c-1d23-4cb2-91b9-d34496a74456\") " pod="openshift-multus/multus-additional-cni-plugins-7fk5l" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.839432 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/bd9c0e44-3329-422a-907b-e9e9bb6194cc-system-cni-dir\") pod \"multus-p6xdd\" (UID: \"bd9c0e44-3329-422a-907b-e9e9bb6194cc\") " pod="openshift-multus/multus-p6xdd" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.839466 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/bd9c0e44-3329-422a-907b-e9e9bb6194cc-multus-socket-dir-parent\") pod \"multus-p6xdd\" (UID: \"bd9c0e44-3329-422a-907b-e9e9bb6194cc\") " pod="openshift-multus/multus-p6xdd" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.839479 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/4b6a179c-1d23-4cb2-91b9-d34496a74456-cnibin\") pod \"multus-additional-cni-plugins-7fk5l\" (UID: \"4b6a179c-1d23-4cb2-91b9-d34496a74456\") " pod="openshift-multus/multus-additional-cni-plugins-7fk5l" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.839509 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/bd9c0e44-3329-422a-907b-e9e9bb6194cc-host-run-k8s-cni-cncf-io\") pod \"multus-p6xdd\" (UID: \"bd9c0e44-3329-422a-907b-e9e9bb6194cc\") " pod="openshift-multus/multus-p6xdd" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.839504 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/bd9c0e44-3329-422a-907b-e9e9bb6194cc-host-run-netns\") pod \"multus-p6xdd\" (UID: \"bd9c0e44-3329-422a-907b-e9e9bb6194cc\") " pod="openshift-multus/multus-p6xdd" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.839530 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/bd9c0e44-3329-422a-907b-e9e9bb6194cc-multus-cni-dir\") pod \"multus-p6xdd\" (UID: \"bd9c0e44-3329-422a-907b-e9e9bb6194cc\") " pod="openshift-multus/multus-p6xdd" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.839563 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/bd9c0e44-3329-422a-907b-e9e9bb6194cc-host-var-lib-cni-bin\") pod \"multus-p6xdd\" (UID: \"bd9c0e44-3329-422a-907b-e9e9bb6194cc\") " pod="openshift-multus/multus-p6xdd" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.839125 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.839974 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/bd9c0e44-3329-422a-907b-e9e9bb6194cc-multus-daemon-config\") pod \"multus-p6xdd\" (UID: \"bd9c0e44-3329-422a-907b-e9e9bb6194cc\") " pod="openshift-multus/multus-p6xdd" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.840317 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/bd9c0e44-3329-422a-907b-e9e9bb6194cc-cni-binary-copy\") pod \"multus-p6xdd\" (UID: \"bd9c0e44-3329-422a-907b-e9e9bb6194cc\") " pod="openshift-multus/multus-p6xdd" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.841012 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/4b6a179c-1d23-4cb2-91b9-d34496a74456-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-7fk5l\" (UID: \"4b6a179c-1d23-4cb2-91b9-d34496a74456\") " pod="openshift-multus/multus-additional-cni-plugins-7fk5l" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.854543 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w7ths\" (UniqueName: \"kubernetes.io/projected/4b6a179c-1d23-4cb2-91b9-d34496a74456-kube-api-access-w7ths\") pod \"multus-additional-cni-plugins-7fk5l\" (UID: \"4b6a179c-1d23-4cb2-91b9-d34496a74456\") " pod="openshift-multus/multus-additional-cni-plugins-7fk5l" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.855682 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kszgd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e999fc8d-dd8d-415f-b72b-1c6189cf18a9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h6wm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kszgd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.864651 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p9wst\" (UniqueName: \"kubernetes.io/projected/bd9c0e44-3329-422a-907b-e9e9bb6194cc-kube-api-access-p9wst\") pod \"multus-p6xdd\" (UID: \"bd9c0e44-3329-422a-907b-e9e9bb6194cc\") " pod="openshift-multus/multus-p6xdd" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.870949 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.938513 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.938556 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.938595 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 11 03:55:25 crc kubenswrapper[4798]: E1011 03:55:25.938749 4798 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Oct 11 03:55:25 crc kubenswrapper[4798]: E1011 03:55:25.938751 4798 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.938784 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 11 03:55:25 crc kubenswrapper[4798]: E1011 03:55:25.938836 4798 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Oct 11 03:55:25 crc kubenswrapper[4798]: E1011 03:55:25.938857 4798 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-10-11 03:55:26.938824207 +0000 UTC m=+22.275113893 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Oct 11 03:55:25 crc kubenswrapper[4798]: E1011 03:55:25.938881 4798 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-10-11 03:55:26.938867648 +0000 UTC m=+22.275157334 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Oct 11 03:55:25 crc kubenswrapper[4798]: E1011 03:55:25.938765 4798 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Oct 11 03:55:25 crc kubenswrapper[4798]: E1011 03:55:25.938913 4798 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Oct 11 03:55:25 crc kubenswrapper[4798]: E1011 03:55:25.938935 4798 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-10-11 03:55:26.938929029 +0000 UTC m=+22.275218715 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Oct 11 03:55:25 crc kubenswrapper[4798]: E1011 03:55:25.938929 4798 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Oct 11 03:55:25 crc kubenswrapper[4798]: E1011 03:55:25.938971 4798 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Oct 11 03:55:25 crc kubenswrapper[4798]: E1011 03:55:25.938991 4798 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Oct 11 03:55:25 crc kubenswrapper[4798]: E1011 03:55:25.939066 4798 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-10-11 03:55:26.939043332 +0000 UTC m=+22.275333018 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.941801 4798 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.945492 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.951694 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-p6xdd" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.955715 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.969689 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-7fk5l" Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.976494 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/kube-controller-manager-crc"] Oct 11 03:55:25 crc kubenswrapper[4798]: I1011 03:55:25.985644 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42571bc8-2186-4e3b-bba9-28f5a8f364d0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5gdw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5gdw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-h28s2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 11 03:55:26 crc kubenswrapper[4798]: I1011 03:55:26.007671 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-7fk5l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4b6a179c-1d23-4cb2-91b9-d34496a74456\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-7fk5l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 11 03:55:26 crc kubenswrapper[4798]: I1011 03:55:26.026740 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 11 03:55:26 crc kubenswrapper[4798]: I1011 03:55:26.038133 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-svt4z"] Oct 11 03:55:26 crc kubenswrapper[4798]: I1011 03:55:26.039080 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-svt4z" Oct 11 03:55:26 crc kubenswrapper[4798]: I1011 03:55:26.039633 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 03:55:26 crc kubenswrapper[4798]: I1011 03:55:26.042461 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"kube-root-ca.crt" Oct 11 03:55:26 crc kubenswrapper[4798]: I1011 03:55:26.042717 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"env-overrides" Oct 11 03:55:26 crc kubenswrapper[4798]: I1011 03:55:26.043090 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-config" Oct 11 03:55:26 crc kubenswrapper[4798]: I1011 03:55:26.042629 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt" Oct 11 03:55:26 crc kubenswrapper[4798]: I1011 03:55:26.045254 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert" Oct 11 03:55:26 crc kubenswrapper[4798]: I1011 03:55:26.045290 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-script-lib" Oct 11 03:55:26 crc kubenswrapper[4798]: I1011 03:55:26.045455 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-pwtwl" Oct 11 03:55:26 crc kubenswrapper[4798]: I1011 03:55:26.046535 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bfd670e2-5a6d-4d56-97d6-c72b5474cfe6\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:05Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:05Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d79289a0b2f4d9eee836f57ba898bd329060194772e80855547f1982371b0921\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1970a648bf4ee991b9b58bfade524bee260b45f727cba5864a6545a10487b3ae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a79edde80930f3aad632c14d6ad6b3df6f8449f32107a82eb906785e873e1ab7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4b8e09f509a9f25922d49024be407873d79d01edb604193f9d19fcd6c89cf96d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://805f894634e76814dc3071315ddd3d8ac46cdf65a12e97f4ca56b98fe53ab25a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-11T03:55:19Z\\\",\\\"message\\\":\\\"W1011 03:55:08.564227 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1011 03:55:08.564630 1 crypto.go:601] Generating new CA for check-endpoints-signer@1760154908 cert, and key in /tmp/serving-cert-1527351980/serving-signer.crt, /tmp/serving-cert-1527351980/serving-signer.key\\\\nI1011 03:55:08.869630 1 observer_polling.go:159] Starting file observer\\\\nW1011 03:55:08.874583 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1011 03:55:08.874721 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1011 03:55:08.878058 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1527351980/tls.crt::/tmp/serving-cert-1527351980/tls.key\\\\\\\"\\\\nF1011 03:55:19.813421 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:08Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://58ca7c079d81ba990c4508c024d6db7c3503be1d628477798459b32e2374ef22\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:08Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1171e7b719c532d99d1d222fc5dbc434060d5d149bd2b6b3dea02f9716816b26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1171e7b719c532d99d1d222fc5dbc434060d5d149bd2b6b3dea02f9716816b26\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:05Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 11 03:55:26 crc kubenswrapper[4798]: E1011 03:55:26.052643 4798 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-11 03:55:27.052319106 +0000 UTC m=+22.388608792 (durationBeforeRetry 1s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 03:55:26 crc kubenswrapper[4798]: I1011 03:55:26.066923 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 11 03:55:26 crc kubenswrapper[4798]: I1011 03:55:26.089433 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 11 03:55:26 crc kubenswrapper[4798]: I1011 03:55:26.098943 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kszgd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e999fc8d-dd8d-415f-b72b-1c6189cf18a9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h6wm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kszgd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 11 03:55:26 crc kubenswrapper[4798]: I1011 03:55:26.111123 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 11 03:55:26 crc kubenswrapper[4798]: I1011 03:55:26.122863 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 11 03:55:26 crc kubenswrapper[4798]: I1011 03:55:26.132541 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-p6xdd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bd9c0e44-3329-422a-907b-e9e9bb6194cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p9wst\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-p6xdd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 11 03:55:26 crc kubenswrapper[4798]: I1011 03:55:26.140869 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/3c2a342b-5252-4957-9e2c-8f81c304b8af-systemd-units\") pod \"ovnkube-node-svt4z\" (UID: \"3c2a342b-5252-4957-9e2c-8f81c304b8af\") " pod="openshift-ovn-kubernetes/ovnkube-node-svt4z" Oct 11 03:55:26 crc kubenswrapper[4798]: I1011 03:55:26.140916 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/3c2a342b-5252-4957-9e2c-8f81c304b8af-run-systemd\") pod \"ovnkube-node-svt4z\" (UID: \"3c2a342b-5252-4957-9e2c-8f81c304b8af\") " pod="openshift-ovn-kubernetes/ovnkube-node-svt4z" Oct 11 03:55:26 crc kubenswrapper[4798]: I1011 03:55:26.140935 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/3c2a342b-5252-4957-9e2c-8f81c304b8af-run-ovn\") pod \"ovnkube-node-svt4z\" (UID: \"3c2a342b-5252-4957-9e2c-8f81c304b8af\") " pod="openshift-ovn-kubernetes/ovnkube-node-svt4z" Oct 11 03:55:26 crc kubenswrapper[4798]: I1011 03:55:26.140957 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jtk6h\" (UniqueName: \"kubernetes.io/projected/3c2a342b-5252-4957-9e2c-8f81c304b8af-kube-api-access-jtk6h\") pod \"ovnkube-node-svt4z\" (UID: \"3c2a342b-5252-4957-9e2c-8f81c304b8af\") " pod="openshift-ovn-kubernetes/ovnkube-node-svt4z" Oct 11 03:55:26 crc kubenswrapper[4798]: I1011 03:55:26.141037 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/3c2a342b-5252-4957-9e2c-8f81c304b8af-host-cni-bin\") pod \"ovnkube-node-svt4z\" (UID: \"3c2a342b-5252-4957-9e2c-8f81c304b8af\") " pod="openshift-ovn-kubernetes/ovnkube-node-svt4z" Oct 11 03:55:26 crc kubenswrapper[4798]: I1011 03:55:26.141073 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/3c2a342b-5252-4957-9e2c-8f81c304b8af-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-svt4z\" (UID: \"3c2a342b-5252-4957-9e2c-8f81c304b8af\") " pod="openshift-ovn-kubernetes/ovnkube-node-svt4z" Oct 11 03:55:26 crc kubenswrapper[4798]: I1011 03:55:26.141134 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/3c2a342b-5252-4957-9e2c-8f81c304b8af-host-cni-netd\") pod \"ovnkube-node-svt4z\" (UID: \"3c2a342b-5252-4957-9e2c-8f81c304b8af\") " pod="openshift-ovn-kubernetes/ovnkube-node-svt4z" Oct 11 03:55:26 crc kubenswrapper[4798]: I1011 03:55:26.141174 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3c2a342b-5252-4957-9e2c-8f81c304b8af-var-lib-openvswitch\") pod \"ovnkube-node-svt4z\" (UID: \"3c2a342b-5252-4957-9e2c-8f81c304b8af\") " pod="openshift-ovn-kubernetes/ovnkube-node-svt4z" Oct 11 03:55:26 crc kubenswrapper[4798]: I1011 03:55:26.141190 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/3c2a342b-5252-4957-9e2c-8f81c304b8af-host-run-ovn-kubernetes\") pod \"ovnkube-node-svt4z\" (UID: \"3c2a342b-5252-4957-9e2c-8f81c304b8af\") " pod="openshift-ovn-kubernetes/ovnkube-node-svt4z" Oct 11 03:55:26 crc kubenswrapper[4798]: I1011 03:55:26.141206 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/3c2a342b-5252-4957-9e2c-8f81c304b8af-ovnkube-script-lib\") pod \"ovnkube-node-svt4z\" (UID: \"3c2a342b-5252-4957-9e2c-8f81c304b8af\") " pod="openshift-ovn-kubernetes/ovnkube-node-svt4z" Oct 11 03:55:26 crc kubenswrapper[4798]: I1011 03:55:26.141263 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/3c2a342b-5252-4957-9e2c-8f81c304b8af-ovn-node-metrics-cert\") pod \"ovnkube-node-svt4z\" (UID: \"3c2a342b-5252-4957-9e2c-8f81c304b8af\") " pod="openshift-ovn-kubernetes/ovnkube-node-svt4z" Oct 11 03:55:26 crc kubenswrapper[4798]: I1011 03:55:26.141282 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3c2a342b-5252-4957-9e2c-8f81c304b8af-run-openvswitch\") pod \"ovnkube-node-svt4z\" (UID: \"3c2a342b-5252-4957-9e2c-8f81c304b8af\") " pod="openshift-ovn-kubernetes/ovnkube-node-svt4z" Oct 11 03:55:26 crc kubenswrapper[4798]: I1011 03:55:26.141302 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/3c2a342b-5252-4957-9e2c-8f81c304b8af-node-log\") pod \"ovnkube-node-svt4z\" (UID: \"3c2a342b-5252-4957-9e2c-8f81c304b8af\") " pod="openshift-ovn-kubernetes/ovnkube-node-svt4z" Oct 11 03:55:26 crc kubenswrapper[4798]: I1011 03:55:26.141335 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/3c2a342b-5252-4957-9e2c-8f81c304b8af-env-overrides\") pod \"ovnkube-node-svt4z\" (UID: \"3c2a342b-5252-4957-9e2c-8f81c304b8af\") " pod="openshift-ovn-kubernetes/ovnkube-node-svt4z" Oct 11 03:55:26 crc kubenswrapper[4798]: I1011 03:55:26.141352 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/3c2a342b-5252-4957-9e2c-8f81c304b8af-log-socket\") pod \"ovnkube-node-svt4z\" (UID: \"3c2a342b-5252-4957-9e2c-8f81c304b8af\") " pod="openshift-ovn-kubernetes/ovnkube-node-svt4z" Oct 11 03:55:26 crc kubenswrapper[4798]: I1011 03:55:26.141374 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/3c2a342b-5252-4957-9e2c-8f81c304b8af-host-kubelet\") pod \"ovnkube-node-svt4z\" (UID: \"3c2a342b-5252-4957-9e2c-8f81c304b8af\") " pod="openshift-ovn-kubernetes/ovnkube-node-svt4z" Oct 11 03:55:26 crc kubenswrapper[4798]: I1011 03:55:26.141411 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/3c2a342b-5252-4957-9e2c-8f81c304b8af-host-slash\") pod \"ovnkube-node-svt4z\" (UID: \"3c2a342b-5252-4957-9e2c-8f81c304b8af\") " pod="openshift-ovn-kubernetes/ovnkube-node-svt4z" Oct 11 03:55:26 crc kubenswrapper[4798]: I1011 03:55:26.141431 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/3c2a342b-5252-4957-9e2c-8f81c304b8af-host-run-netns\") pod \"ovnkube-node-svt4z\" (UID: \"3c2a342b-5252-4957-9e2c-8f81c304b8af\") " pod="openshift-ovn-kubernetes/ovnkube-node-svt4z" Oct 11 03:55:26 crc kubenswrapper[4798]: I1011 03:55:26.141452 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3c2a342b-5252-4957-9e2c-8f81c304b8af-etc-openvswitch\") pod \"ovnkube-node-svt4z\" (UID: \"3c2a342b-5252-4957-9e2c-8f81c304b8af\") " pod="openshift-ovn-kubernetes/ovnkube-node-svt4z" Oct 11 03:55:26 crc kubenswrapper[4798]: I1011 03:55:26.141469 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/3c2a342b-5252-4957-9e2c-8f81c304b8af-ovnkube-config\") pod \"ovnkube-node-svt4z\" (UID: \"3c2a342b-5252-4957-9e2c-8f81c304b8af\") " pod="openshift-ovn-kubernetes/ovnkube-node-svt4z" Oct 11 03:55:26 crc kubenswrapper[4798]: I1011 03:55:26.143941 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 11 03:55:26 crc kubenswrapper[4798]: I1011 03:55:26.158221 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-p6xdd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bd9c0e44-3329-422a-907b-e9e9bb6194cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p9wst\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-p6xdd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 11 03:55:26 crc kubenswrapper[4798]: I1011 03:55:26.169458 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 11 03:55:26 crc kubenswrapper[4798]: I1011 03:55:26.179432 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42571bc8-2186-4e3b-bba9-28f5a8f364d0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5gdw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5gdw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-h28s2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 11 03:55:26 crc kubenswrapper[4798]: I1011 03:55:26.194199 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-7fk5l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4b6a179c-1d23-4cb2-91b9-d34496a74456\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-7fk5l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 11 03:55:26 crc kubenswrapper[4798]: I1011 03:55:26.214614 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-svt4z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3c2a342b-5252-4957-9e2c-8f81c304b8af\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:26Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-svt4z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 11 03:55:26 crc kubenswrapper[4798]: I1011 03:55:26.225319 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 11 03:55:26 crc kubenswrapper[4798]: I1011 03:55:26.237849 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bfd670e2-5a6d-4d56-97d6-c72b5474cfe6\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:05Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:05Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d79289a0b2f4d9eee836f57ba898bd329060194772e80855547f1982371b0921\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1970a648bf4ee991b9b58bfade524bee260b45f727cba5864a6545a10487b3ae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a79edde80930f3aad632c14d6ad6b3df6f8449f32107a82eb906785e873e1ab7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4b8e09f509a9f25922d49024be407873d79d01edb604193f9d19fcd6c89cf96d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://805f894634e76814dc3071315ddd3d8ac46cdf65a12e97f4ca56b98fe53ab25a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-11T03:55:19Z\\\",\\\"message\\\":\\\"W1011 03:55:08.564227 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1011 03:55:08.564630 1 crypto.go:601] Generating new CA for check-endpoints-signer@1760154908 cert, and key in /tmp/serving-cert-1527351980/serving-signer.crt, /tmp/serving-cert-1527351980/serving-signer.key\\\\nI1011 03:55:08.869630 1 observer_polling.go:159] Starting file observer\\\\nW1011 03:55:08.874583 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1011 03:55:08.874721 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1011 03:55:08.878058 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1527351980/tls.crt::/tmp/serving-cert-1527351980/tls.key\\\\\\\"\\\\nF1011 03:55:19.813421 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:08Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://58ca7c079d81ba990c4508c024d6db7c3503be1d628477798459b32e2374ef22\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:08Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1171e7b719c532d99d1d222fc5dbc434060d5d149bd2b6b3dea02f9716816b26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1171e7b719c532d99d1d222fc5dbc434060d5d149bd2b6b3dea02f9716816b26\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:05Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 11 03:55:26 crc kubenswrapper[4798]: I1011 03:55:26.241993 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/3c2a342b-5252-4957-9e2c-8f81c304b8af-host-cni-netd\") pod \"ovnkube-node-svt4z\" (UID: \"3c2a342b-5252-4957-9e2c-8f81c304b8af\") " pod="openshift-ovn-kubernetes/ovnkube-node-svt4z" Oct 11 03:55:26 crc kubenswrapper[4798]: I1011 03:55:26.242034 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3c2a342b-5252-4957-9e2c-8f81c304b8af-var-lib-openvswitch\") pod \"ovnkube-node-svt4z\" (UID: \"3c2a342b-5252-4957-9e2c-8f81c304b8af\") " pod="openshift-ovn-kubernetes/ovnkube-node-svt4z" Oct 11 03:55:26 crc kubenswrapper[4798]: I1011 03:55:26.242058 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/3c2a342b-5252-4957-9e2c-8f81c304b8af-host-run-ovn-kubernetes\") pod \"ovnkube-node-svt4z\" (UID: \"3c2a342b-5252-4957-9e2c-8f81c304b8af\") " pod="openshift-ovn-kubernetes/ovnkube-node-svt4z" Oct 11 03:55:26 crc kubenswrapper[4798]: I1011 03:55:26.242074 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/3c2a342b-5252-4957-9e2c-8f81c304b8af-ovnkube-script-lib\") pod \"ovnkube-node-svt4z\" (UID: \"3c2a342b-5252-4957-9e2c-8f81c304b8af\") " pod="openshift-ovn-kubernetes/ovnkube-node-svt4z" Oct 11 03:55:26 crc kubenswrapper[4798]: I1011 03:55:26.242089 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/3c2a342b-5252-4957-9e2c-8f81c304b8af-ovn-node-metrics-cert\") pod \"ovnkube-node-svt4z\" (UID: \"3c2a342b-5252-4957-9e2c-8f81c304b8af\") " pod="openshift-ovn-kubernetes/ovnkube-node-svt4z" Oct 11 03:55:26 crc kubenswrapper[4798]: I1011 03:55:26.242108 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3c2a342b-5252-4957-9e2c-8f81c304b8af-run-openvswitch\") pod \"ovnkube-node-svt4z\" (UID: \"3c2a342b-5252-4957-9e2c-8f81c304b8af\") " pod="openshift-ovn-kubernetes/ovnkube-node-svt4z" Oct 11 03:55:26 crc kubenswrapper[4798]: I1011 03:55:26.242109 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/3c2a342b-5252-4957-9e2c-8f81c304b8af-host-cni-netd\") pod \"ovnkube-node-svt4z\" (UID: \"3c2a342b-5252-4957-9e2c-8f81c304b8af\") " pod="openshift-ovn-kubernetes/ovnkube-node-svt4z" Oct 11 03:55:26 crc kubenswrapper[4798]: I1011 03:55:26.242185 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3c2a342b-5252-4957-9e2c-8f81c304b8af-run-openvswitch\") pod \"ovnkube-node-svt4z\" (UID: \"3c2a342b-5252-4957-9e2c-8f81c304b8af\") " pod="openshift-ovn-kubernetes/ovnkube-node-svt4z" Oct 11 03:55:26 crc kubenswrapper[4798]: I1011 03:55:26.242220 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3c2a342b-5252-4957-9e2c-8f81c304b8af-var-lib-openvswitch\") pod \"ovnkube-node-svt4z\" (UID: \"3c2a342b-5252-4957-9e2c-8f81c304b8af\") " pod="openshift-ovn-kubernetes/ovnkube-node-svt4z" Oct 11 03:55:26 crc kubenswrapper[4798]: I1011 03:55:26.242259 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/3c2a342b-5252-4957-9e2c-8f81c304b8af-host-run-ovn-kubernetes\") pod \"ovnkube-node-svt4z\" (UID: \"3c2a342b-5252-4957-9e2c-8f81c304b8af\") " pod="openshift-ovn-kubernetes/ovnkube-node-svt4z" Oct 11 03:55:26 crc kubenswrapper[4798]: I1011 03:55:26.242129 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/3c2a342b-5252-4957-9e2c-8f81c304b8af-node-log\") pod \"ovnkube-node-svt4z\" (UID: \"3c2a342b-5252-4957-9e2c-8f81c304b8af\") " pod="openshift-ovn-kubernetes/ovnkube-node-svt4z" Oct 11 03:55:26 crc kubenswrapper[4798]: I1011 03:55:26.242303 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/3c2a342b-5252-4957-9e2c-8f81c304b8af-env-overrides\") pod \"ovnkube-node-svt4z\" (UID: \"3c2a342b-5252-4957-9e2c-8f81c304b8af\") " pod="openshift-ovn-kubernetes/ovnkube-node-svt4z" Oct 11 03:55:26 crc kubenswrapper[4798]: I1011 03:55:26.242326 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/3c2a342b-5252-4957-9e2c-8f81c304b8af-log-socket\") pod \"ovnkube-node-svt4z\" (UID: \"3c2a342b-5252-4957-9e2c-8f81c304b8af\") " pod="openshift-ovn-kubernetes/ovnkube-node-svt4z" Oct 11 03:55:26 crc kubenswrapper[4798]: I1011 03:55:26.242355 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/3c2a342b-5252-4957-9e2c-8f81c304b8af-host-kubelet\") pod \"ovnkube-node-svt4z\" (UID: \"3c2a342b-5252-4957-9e2c-8f81c304b8af\") " pod="openshift-ovn-kubernetes/ovnkube-node-svt4z" Oct 11 03:55:26 crc kubenswrapper[4798]: I1011 03:55:26.242378 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/3c2a342b-5252-4957-9e2c-8f81c304b8af-host-slash\") pod \"ovnkube-node-svt4z\" (UID: \"3c2a342b-5252-4957-9e2c-8f81c304b8af\") " pod="openshift-ovn-kubernetes/ovnkube-node-svt4z" Oct 11 03:55:26 crc kubenswrapper[4798]: I1011 03:55:26.242422 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/3c2a342b-5252-4957-9e2c-8f81c304b8af-host-run-netns\") pod \"ovnkube-node-svt4z\" (UID: \"3c2a342b-5252-4957-9e2c-8f81c304b8af\") " pod="openshift-ovn-kubernetes/ovnkube-node-svt4z" Oct 11 03:55:26 crc kubenswrapper[4798]: I1011 03:55:26.242466 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/3c2a342b-5252-4957-9e2c-8f81c304b8af-node-log\") pod \"ovnkube-node-svt4z\" (UID: \"3c2a342b-5252-4957-9e2c-8f81c304b8af\") " pod="openshift-ovn-kubernetes/ovnkube-node-svt4z" Oct 11 03:55:26 crc kubenswrapper[4798]: I1011 03:55:26.242501 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/3c2a342b-5252-4957-9e2c-8f81c304b8af-host-kubelet\") pod \"ovnkube-node-svt4z\" (UID: \"3c2a342b-5252-4957-9e2c-8f81c304b8af\") " pod="openshift-ovn-kubernetes/ovnkube-node-svt4z" Oct 11 03:55:26 crc kubenswrapper[4798]: I1011 03:55:26.242572 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3c2a342b-5252-4957-9e2c-8f81c304b8af-etc-openvswitch\") pod \"ovnkube-node-svt4z\" (UID: \"3c2a342b-5252-4957-9e2c-8f81c304b8af\") " pod="openshift-ovn-kubernetes/ovnkube-node-svt4z" Oct 11 03:55:26 crc kubenswrapper[4798]: I1011 03:55:26.242915 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/3c2a342b-5252-4957-9e2c-8f81c304b8af-log-socket\") pod \"ovnkube-node-svt4z\" (UID: \"3c2a342b-5252-4957-9e2c-8f81c304b8af\") " pod="openshift-ovn-kubernetes/ovnkube-node-svt4z" Oct 11 03:55:26 crc kubenswrapper[4798]: I1011 03:55:26.242943 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/3c2a342b-5252-4957-9e2c-8f81c304b8af-host-run-netns\") pod \"ovnkube-node-svt4z\" (UID: \"3c2a342b-5252-4957-9e2c-8f81c304b8af\") " pod="openshift-ovn-kubernetes/ovnkube-node-svt4z" Oct 11 03:55:26 crc kubenswrapper[4798]: I1011 03:55:26.242989 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3c2a342b-5252-4957-9e2c-8f81c304b8af-etc-openvswitch\") pod \"ovnkube-node-svt4z\" (UID: \"3c2a342b-5252-4957-9e2c-8f81c304b8af\") " pod="openshift-ovn-kubernetes/ovnkube-node-svt4z" Oct 11 03:55:26 crc kubenswrapper[4798]: I1011 03:55:26.242992 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/3c2a342b-5252-4957-9e2c-8f81c304b8af-env-overrides\") pod \"ovnkube-node-svt4z\" (UID: \"3c2a342b-5252-4957-9e2c-8f81c304b8af\") " pod="openshift-ovn-kubernetes/ovnkube-node-svt4z" Oct 11 03:55:26 crc kubenswrapper[4798]: I1011 03:55:26.242590 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/3c2a342b-5252-4957-9e2c-8f81c304b8af-ovnkube-config\") pod \"ovnkube-node-svt4z\" (UID: \"3c2a342b-5252-4957-9e2c-8f81c304b8af\") " pod="openshift-ovn-kubernetes/ovnkube-node-svt4z" Oct 11 03:55:26 crc kubenswrapper[4798]: I1011 03:55:26.243014 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/3c2a342b-5252-4957-9e2c-8f81c304b8af-host-slash\") pod \"ovnkube-node-svt4z\" (UID: \"3c2a342b-5252-4957-9e2c-8f81c304b8af\") " pod="openshift-ovn-kubernetes/ovnkube-node-svt4z" Oct 11 03:55:26 crc kubenswrapper[4798]: I1011 03:55:26.243032 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/3c2a342b-5252-4957-9e2c-8f81c304b8af-ovnkube-script-lib\") pod \"ovnkube-node-svt4z\" (UID: \"3c2a342b-5252-4957-9e2c-8f81c304b8af\") " pod="openshift-ovn-kubernetes/ovnkube-node-svt4z" Oct 11 03:55:26 crc kubenswrapper[4798]: I1011 03:55:26.243121 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/3c2a342b-5252-4957-9e2c-8f81c304b8af-systemd-units\") pod \"ovnkube-node-svt4z\" (UID: \"3c2a342b-5252-4957-9e2c-8f81c304b8af\") " pod="openshift-ovn-kubernetes/ovnkube-node-svt4z" Oct 11 03:55:26 crc kubenswrapper[4798]: I1011 03:55:26.243154 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/3c2a342b-5252-4957-9e2c-8f81c304b8af-run-systemd\") pod \"ovnkube-node-svt4z\" (UID: \"3c2a342b-5252-4957-9e2c-8f81c304b8af\") " pod="openshift-ovn-kubernetes/ovnkube-node-svt4z" Oct 11 03:55:26 crc kubenswrapper[4798]: I1011 03:55:26.243176 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/3c2a342b-5252-4957-9e2c-8f81c304b8af-run-ovn\") pod \"ovnkube-node-svt4z\" (UID: \"3c2a342b-5252-4957-9e2c-8f81c304b8af\") " pod="openshift-ovn-kubernetes/ovnkube-node-svt4z" Oct 11 03:55:26 crc kubenswrapper[4798]: I1011 03:55:26.243210 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jtk6h\" (UniqueName: \"kubernetes.io/projected/3c2a342b-5252-4957-9e2c-8f81c304b8af-kube-api-access-jtk6h\") pod \"ovnkube-node-svt4z\" (UID: \"3c2a342b-5252-4957-9e2c-8f81c304b8af\") " pod="openshift-ovn-kubernetes/ovnkube-node-svt4z" Oct 11 03:55:26 crc kubenswrapper[4798]: I1011 03:55:26.243219 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/3c2a342b-5252-4957-9e2c-8f81c304b8af-systemd-units\") pod \"ovnkube-node-svt4z\" (UID: \"3c2a342b-5252-4957-9e2c-8f81c304b8af\") " pod="openshift-ovn-kubernetes/ovnkube-node-svt4z" Oct 11 03:55:26 crc kubenswrapper[4798]: I1011 03:55:26.243226 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/3c2a342b-5252-4957-9e2c-8f81c304b8af-run-systemd\") pod \"ovnkube-node-svt4z\" (UID: \"3c2a342b-5252-4957-9e2c-8f81c304b8af\") " pod="openshift-ovn-kubernetes/ovnkube-node-svt4z" Oct 11 03:55:26 crc kubenswrapper[4798]: I1011 03:55:26.243274 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/3c2a342b-5252-4957-9e2c-8f81c304b8af-host-cni-bin\") pod \"ovnkube-node-svt4z\" (UID: \"3c2a342b-5252-4957-9e2c-8f81c304b8af\") " pod="openshift-ovn-kubernetes/ovnkube-node-svt4z" Oct 11 03:55:26 crc kubenswrapper[4798]: I1011 03:55:26.243270 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/3c2a342b-5252-4957-9e2c-8f81c304b8af-run-ovn\") pod \"ovnkube-node-svt4z\" (UID: \"3c2a342b-5252-4957-9e2c-8f81c304b8af\") " pod="openshift-ovn-kubernetes/ovnkube-node-svt4z" Oct 11 03:55:26 crc kubenswrapper[4798]: I1011 03:55:26.243241 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/3c2a342b-5252-4957-9e2c-8f81c304b8af-host-cni-bin\") pod \"ovnkube-node-svt4z\" (UID: \"3c2a342b-5252-4957-9e2c-8f81c304b8af\") " pod="openshift-ovn-kubernetes/ovnkube-node-svt4z" Oct 11 03:55:26 crc kubenswrapper[4798]: I1011 03:55:26.243344 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/3c2a342b-5252-4957-9e2c-8f81c304b8af-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-svt4z\" (UID: \"3c2a342b-5252-4957-9e2c-8f81c304b8af\") " pod="openshift-ovn-kubernetes/ovnkube-node-svt4z" Oct 11 03:55:26 crc kubenswrapper[4798]: I1011 03:55:26.243421 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/3c2a342b-5252-4957-9e2c-8f81c304b8af-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-svt4z\" (UID: \"3c2a342b-5252-4957-9e2c-8f81c304b8af\") " pod="openshift-ovn-kubernetes/ovnkube-node-svt4z" Oct 11 03:55:26 crc kubenswrapper[4798]: I1011 03:55:26.243599 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/3c2a342b-5252-4957-9e2c-8f81c304b8af-ovnkube-config\") pod \"ovnkube-node-svt4z\" (UID: \"3c2a342b-5252-4957-9e2c-8f81c304b8af\") " pod="openshift-ovn-kubernetes/ovnkube-node-svt4z" Oct 11 03:55:26 crc kubenswrapper[4798]: I1011 03:55:26.248281 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/3c2a342b-5252-4957-9e2c-8f81c304b8af-ovn-node-metrics-cert\") pod \"ovnkube-node-svt4z\" (UID: \"3c2a342b-5252-4957-9e2c-8f81c304b8af\") " pod="openshift-ovn-kubernetes/ovnkube-node-svt4z" Oct 11 03:55:26 crc kubenswrapper[4798]: I1011 03:55:26.251730 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2f4f8842-e378-44a9-b919-b1bf9a8df80e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://903e13c07c622087e30f21ffff2b60c31c852d8e994c23d7af6beab5a71bb873\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1087e7b8de486a192155ed94497edbca0dc98e4a84a577267fb132fc48e6b142\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://08a33c11cee6a48ebcf66f251111b8bd888bd40fe6b52f43a3768eac817ed59f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eff419f507fc5362e956419c6fd14dc6a161f7297b026a5ec449647051f98767\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:05Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 11 03:55:26 crc kubenswrapper[4798]: I1011 03:55:26.261614 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jtk6h\" (UniqueName: \"kubernetes.io/projected/3c2a342b-5252-4957-9e2c-8f81c304b8af-kube-api-access-jtk6h\") pod \"ovnkube-node-svt4z\" (UID: \"3c2a342b-5252-4957-9e2c-8f81c304b8af\") " pod="openshift-ovn-kubernetes/ovnkube-node-svt4z" Oct 11 03:55:26 crc kubenswrapper[4798]: I1011 03:55:26.268337 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 11 03:55:26 crc kubenswrapper[4798]: I1011 03:55:26.295347 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 11 03:55:26 crc kubenswrapper[4798]: I1011 03:55:26.333873 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kszgd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e999fc8d-dd8d-415f-b72b-1c6189cf18a9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h6wm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kszgd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 11 03:55:26 crc kubenswrapper[4798]: I1011 03:55:26.364814 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-svt4z" Oct 11 03:55:26 crc kubenswrapper[4798]: I1011 03:55:26.381205 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:26Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:26 crc kubenswrapper[4798]: I1011 03:55:26.565479 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-7fk5l" event={"ID":"4b6a179c-1d23-4cb2-91b9-d34496a74456","Type":"ContainerStarted","Data":"8b908ba64c3d6d1d45e1cab82e69176c3c84bf88dde3e9425fc7c3b4c84685ed"} Oct 11 03:55:26 crc kubenswrapper[4798]: I1011 03:55:26.565546 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-7fk5l" event={"ID":"4b6a179c-1d23-4cb2-91b9-d34496a74456","Type":"ContainerStarted","Data":"28c083978296acdcf7b364d20d33a0253607d14d7df0d9124f4f3b8516e968c7"} Oct 11 03:55:26 crc kubenswrapper[4798]: I1011 03:55:26.567064 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-p6xdd" event={"ID":"bd9c0e44-3329-422a-907b-e9e9bb6194cc","Type":"ContainerStarted","Data":"a46e4bfff29af67bd6304e68df8fb4aa7e2184648b85398f453fde878b27d825"} Oct 11 03:55:26 crc kubenswrapper[4798]: I1011 03:55:26.567127 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-p6xdd" event={"ID":"bd9c0e44-3329-422a-907b-e9e9bb6194cc","Type":"ContainerStarted","Data":"03540327f2a78fe42337b334dc79b1758451b132a84b53a772ab7ca89ed91e54"} Oct 11 03:55:26 crc kubenswrapper[4798]: I1011 03:55:26.568844 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"c7d7ebc7a2d57aad39b176f132087445b18980bb9c3551024fd0aedb01ba11d0"} Oct 11 03:55:26 crc kubenswrapper[4798]: I1011 03:55:26.569766 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-svt4z" event={"ID":"3c2a342b-5252-4957-9e2c-8f81c304b8af","Type":"ContainerStarted","Data":"276a12e25ec2902ce6c28d1df642a186aaae40e269e8a02bfc45420aa26e18c7"} Oct 11 03:55:26 crc kubenswrapper[4798]: I1011 03:55:26.571240 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"6129cc4f78a62c5fb76f4859ec954c8901954d27b7f05672e97d992e0c933191"} Oct 11 03:55:26 crc kubenswrapper[4798]: I1011 03:55:26.571294 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"11c6cd4e8ce02e0d1005b9bc4a75374c7760832e8b9197a8b42b12eaa7e34c87"} Oct 11 03:55:26 crc kubenswrapper[4798]: I1011 03:55:26.572606 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-kszgd" event={"ID":"e999fc8d-dd8d-415f-b72b-1c6189cf18a9","Type":"ContainerStarted","Data":"4a92a11778001614d226c95af9dde787c78232568c3b5c073618a843f1225b21"} Oct 11 03:55:26 crc kubenswrapper[4798]: I1011 03:55:26.572651 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-kszgd" event={"ID":"e999fc8d-dd8d-415f-b72b-1c6189cf18a9","Type":"ContainerStarted","Data":"1047bfca4e917027c853191e3b345bc94992a65a3c2a2b4ef69346fbf81745f4"} Oct 11 03:55:26 crc kubenswrapper[4798]: I1011 03:55:26.574448 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" event={"ID":"42571bc8-2186-4e3b-bba9-28f5a8f364d0","Type":"ContainerStarted","Data":"7c66c40f71f0877eee058976f94625fe1c4bcab5dfad363be39e6863c2e9f07a"} Oct 11 03:55:26 crc kubenswrapper[4798]: I1011 03:55:26.574510 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" event={"ID":"42571bc8-2186-4e3b-bba9-28f5a8f364d0","Type":"ContainerStarted","Data":"d4862faa12fa49bd57098b8b4e9e9dd287da8c8808a9ca32bfdd3a9b4313945d"} Oct 11 03:55:26 crc kubenswrapper[4798]: I1011 03:55:26.574528 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" event={"ID":"42571bc8-2186-4e3b-bba9-28f5a8f364d0","Type":"ContainerStarted","Data":"22688dfe574613f9fe6031353d8ddd763ad2a6cca72a126895650a4c7bccd6f1"} Oct 11 03:55:26 crc kubenswrapper[4798]: I1011 03:55:26.575972 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"5ec61df088ad49a0392264c33da84b7324c4aa608201d71e309401ab80013fb8"} Oct 11 03:55:26 crc kubenswrapper[4798]: I1011 03:55:26.576022 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"10697a88b117962307b632f70354acdfd493e92c2909908a31c90c754dcbc0ba"} Oct 11 03:55:26 crc kubenswrapper[4798]: I1011 03:55:26.579921 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:26Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:26 crc kubenswrapper[4798]: I1011 03:55:26.594057 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42571bc8-2186-4e3b-bba9-28f5a8f364d0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5gdw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5gdw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-h28s2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:26Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:26 crc kubenswrapper[4798]: I1011 03:55:26.612802 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-7fk5l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4b6a179c-1d23-4cb2-91b9-d34496a74456\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b908ba64c3d6d1d45e1cab82e69176c3c84bf88dde3e9425fc7c3b4c84685ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-7fk5l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:26Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:26 crc kubenswrapper[4798]: I1011 03:55:26.680739 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-svt4z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3c2a342b-5252-4957-9e2c-8f81c304b8af\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:26Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-svt4z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:26Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:26 crc kubenswrapper[4798]: I1011 03:55:26.696838 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bfd670e2-5a6d-4d56-97d6-c72b5474cfe6\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:05Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:05Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d79289a0b2f4d9eee836f57ba898bd329060194772e80855547f1982371b0921\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1970a648bf4ee991b9b58bfade524bee260b45f727cba5864a6545a10487b3ae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a79edde80930f3aad632c14d6ad6b3df6f8449f32107a82eb906785e873e1ab7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4b8e09f509a9f25922d49024be407873d79d01edb604193f9d19fcd6c89cf96d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://805f894634e76814dc3071315ddd3d8ac46cdf65a12e97f4ca56b98fe53ab25a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-11T03:55:19Z\\\",\\\"message\\\":\\\"W1011 03:55:08.564227 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1011 03:55:08.564630 1 crypto.go:601] Generating new CA for check-endpoints-signer@1760154908 cert, and key in /tmp/serving-cert-1527351980/serving-signer.crt, /tmp/serving-cert-1527351980/serving-signer.key\\\\nI1011 03:55:08.869630 1 observer_polling.go:159] Starting file observer\\\\nW1011 03:55:08.874583 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1011 03:55:08.874721 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1011 03:55:08.878058 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1527351980/tls.crt::/tmp/serving-cert-1527351980/tls.key\\\\\\\"\\\\nF1011 03:55:19.813421 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:08Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://58ca7c079d81ba990c4508c024d6db7c3503be1d628477798459b32e2374ef22\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:08Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1171e7b719c532d99d1d222fc5dbc434060d5d149bd2b6b3dea02f9716816b26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1171e7b719c532d99d1d222fc5dbc434060d5d149bd2b6b3dea02f9716816b26\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:05Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:26Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:26 crc kubenswrapper[4798]: I1011 03:55:26.709821 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2f4f8842-e378-44a9-b919-b1bf9a8df80e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://903e13c07c622087e30f21ffff2b60c31c852d8e994c23d7af6beab5a71bb873\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1087e7b8de486a192155ed94497edbca0dc98e4a84a577267fb132fc48e6b142\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://08a33c11cee6a48ebcf66f251111b8bd888bd40fe6b52f43a3768eac817ed59f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eff419f507fc5362e956419c6fd14dc6a161f7297b026a5ec449647051f98767\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:05Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:26Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:26 crc kubenswrapper[4798]: I1011 03:55:26.723976 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:26Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:26 crc kubenswrapper[4798]: I1011 03:55:26.735599 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:26Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:26 crc kubenswrapper[4798]: I1011 03:55:26.748527 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kszgd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e999fc8d-dd8d-415f-b72b-1c6189cf18a9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h6wm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kszgd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:26Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:26 crc kubenswrapper[4798]: I1011 03:55:26.779610 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:26Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:26 crc kubenswrapper[4798]: I1011 03:55:26.820127 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:26Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:26 crc kubenswrapper[4798]: I1011 03:55:26.859277 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:26Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:26 crc kubenswrapper[4798]: I1011 03:55:26.898798 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-p6xdd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bd9c0e44-3329-422a-907b-e9e9bb6194cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p9wst\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-p6xdd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:26Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:26 crc kubenswrapper[4798]: I1011 03:55:26.938733 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6129cc4f78a62c5fb76f4859ec954c8901954d27b7f05672e97d992e0c933191\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:26Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:26 crc kubenswrapper[4798]: I1011 03:55:26.974717 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 11 03:55:26 crc kubenswrapper[4798]: I1011 03:55:26.974797 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 11 03:55:26 crc kubenswrapper[4798]: I1011 03:55:26.974828 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 11 03:55:26 crc kubenswrapper[4798]: I1011 03:55:26.974863 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 11 03:55:26 crc kubenswrapper[4798]: E1011 03:55:26.974916 4798 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Oct 11 03:55:26 crc kubenswrapper[4798]: E1011 03:55:26.974949 4798 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Oct 11 03:55:26 crc kubenswrapper[4798]: E1011 03:55:26.974979 4798 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Oct 11 03:55:26 crc kubenswrapper[4798]: E1011 03:55:26.974980 4798 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Oct 11 03:55:26 crc kubenswrapper[4798]: E1011 03:55:26.974995 4798 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Oct 11 03:55:26 crc kubenswrapper[4798]: E1011 03:55:26.975025 4798 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-10-11 03:55:28.975000368 +0000 UTC m=+24.311290054 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Oct 11 03:55:26 crc kubenswrapper[4798]: E1011 03:55:26.975051 4798 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-10-11 03:55:28.975035799 +0000 UTC m=+24.311325485 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Oct 11 03:55:26 crc kubenswrapper[4798]: E1011 03:55:26.975071 4798 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-10-11 03:55:28.975063769 +0000 UTC m=+24.311353455 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Oct 11 03:55:26 crc kubenswrapper[4798]: E1011 03:55:26.975138 4798 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Oct 11 03:55:26 crc kubenswrapper[4798]: E1011 03:55:26.975158 4798 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Oct 11 03:55:26 crc kubenswrapper[4798]: E1011 03:55:26.975175 4798 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Oct 11 03:55:26 crc kubenswrapper[4798]: E1011 03:55:26.975224 4798 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-10-11 03:55:28.975202863 +0000 UTC m=+24.311492569 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Oct 11 03:55:26 crc kubenswrapper[4798]: I1011 03:55:26.981753 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-p6xdd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bd9c0e44-3329-422a-907b-e9e9bb6194cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a46e4bfff29af67bd6304e68df8fb4aa7e2184648b85398f453fde878b27d825\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p9wst\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-p6xdd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:26Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:27 crc kubenswrapper[4798]: I1011 03:55:27.021211 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42571bc8-2186-4e3b-bba9-28f5a8f364d0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c66c40f71f0877eee058976f94625fe1c4bcab5dfad363be39e6863c2e9f07a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5gdw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d4862faa12fa49bd57098b8b4e9e9dd287da8c8808a9ca32bfdd3a9b4313945d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5gdw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-h28s2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:27Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:27 crc kubenswrapper[4798]: I1011 03:55:27.063094 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-7fk5l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4b6a179c-1d23-4cb2-91b9-d34496a74456\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b908ba64c3d6d1d45e1cab82e69176c3c84bf88dde3e9425fc7c3b4c84685ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-7fk5l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:27Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:27 crc kubenswrapper[4798]: I1011 03:55:27.075814 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 03:55:27 crc kubenswrapper[4798]: E1011 03:55:27.076017 4798 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-11 03:55:29.075986633 +0000 UTC m=+24.412276319 (durationBeforeRetry 2s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 03:55:27 crc kubenswrapper[4798]: I1011 03:55:27.111112 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-svt4z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3c2a342b-5252-4957-9e2c-8f81c304b8af\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:26Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-svt4z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:27Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:27 crc kubenswrapper[4798]: I1011 03:55:27.140907 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:27Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:27 crc kubenswrapper[4798]: I1011 03:55:27.181663 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2f4f8842-e378-44a9-b919-b1bf9a8df80e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://903e13c07c622087e30f21ffff2b60c31c852d8e994c23d7af6beab5a71bb873\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1087e7b8de486a192155ed94497edbca0dc98e4a84a577267fb132fc48e6b142\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://08a33c11cee6a48ebcf66f251111b8bd888bd40fe6b52f43a3768eac817ed59f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eff419f507fc5362e956419c6fd14dc6a161f7297b026a5ec449647051f98767\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:05Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:27Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:27 crc kubenswrapper[4798]: I1011 03:55:27.218728 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:27Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:27 crc kubenswrapper[4798]: I1011 03:55:27.272990 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:27Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:27 crc kubenswrapper[4798]: I1011 03:55:27.296415 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kszgd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e999fc8d-dd8d-415f-b72b-1c6189cf18a9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a92a11778001614d226c95af9dde787c78232568c3b5c073618a843f1225b21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h6wm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kszgd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:27Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:27 crc kubenswrapper[4798]: I1011 03:55:27.338770 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:27Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:27 crc kubenswrapper[4798]: I1011 03:55:27.383112 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bfd670e2-5a6d-4d56-97d6-c72b5474cfe6\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:05Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:05Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d79289a0b2f4d9eee836f57ba898bd329060194772e80855547f1982371b0921\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1970a648bf4ee991b9b58bfade524bee260b45f727cba5864a6545a10487b3ae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a79edde80930f3aad632c14d6ad6b3df6f8449f32107a82eb906785e873e1ab7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4b8e09f509a9f25922d49024be407873d79d01edb604193f9d19fcd6c89cf96d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://805f894634e76814dc3071315ddd3d8ac46cdf65a12e97f4ca56b98fe53ab25a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-11T03:55:19Z\\\",\\\"message\\\":\\\"W1011 03:55:08.564227 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1011 03:55:08.564630 1 crypto.go:601] Generating new CA for check-endpoints-signer@1760154908 cert, and key in /tmp/serving-cert-1527351980/serving-signer.crt, /tmp/serving-cert-1527351980/serving-signer.key\\\\nI1011 03:55:08.869630 1 observer_polling.go:159] Starting file observer\\\\nW1011 03:55:08.874583 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1011 03:55:08.874721 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1011 03:55:08.878058 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1527351980/tls.crt::/tmp/serving-cert-1527351980/tls.key\\\\\\\"\\\\nF1011 03:55:19.813421 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:08Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://58ca7c079d81ba990c4508c024d6db7c3503be1d628477798459b32e2374ef22\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:08Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1171e7b719c532d99d1d222fc5dbc434060d5d149bd2b6b3dea02f9716816b26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1171e7b719c532d99d1d222fc5dbc434060d5d149bd2b6b3dea02f9716816b26\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:05Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:27Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:27 crc kubenswrapper[4798]: I1011 03:55:27.421940 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5ec61df088ad49a0392264c33da84b7324c4aa608201d71e309401ab80013fb8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://10697a88b117962307b632f70354acdfd493e92c2909908a31c90c754dcbc0ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:27Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:27 crc kubenswrapper[4798]: I1011 03:55:27.423072 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 11 03:55:27 crc kubenswrapper[4798]: I1011 03:55:27.423118 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 11 03:55:27 crc kubenswrapper[4798]: I1011 03:55:27.423090 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 11 03:55:27 crc kubenswrapper[4798]: E1011 03:55:27.423249 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 11 03:55:27 crc kubenswrapper[4798]: E1011 03:55:27.423423 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 11 03:55:27 crc kubenswrapper[4798]: E1011 03:55:27.423520 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 11 03:55:27 crc kubenswrapper[4798]: I1011 03:55:27.428440 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="01ab3dd5-8196-46d0-ad33-122e2ca51def" path="/var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes" Oct 11 03:55:27 crc kubenswrapper[4798]: I1011 03:55:27.429201 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" path="/var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes" Oct 11 03:55:27 crc kubenswrapper[4798]: I1011 03:55:27.430385 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09efc573-dbb6-4249-bd59-9b87aba8dd28" path="/var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes" Oct 11 03:55:27 crc kubenswrapper[4798]: I1011 03:55:27.431046 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b574797-001e-440a-8f4e-c0be86edad0f" path="/var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes" Oct 11 03:55:27 crc kubenswrapper[4798]: I1011 03:55:27.432225 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b78653f-4ff9-4508-8672-245ed9b561e3" path="/var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes" Oct 11 03:55:27 crc kubenswrapper[4798]: I1011 03:55:27.432838 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1386a44e-36a2-460c-96d0-0359d2b6f0f5" path="/var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes" Oct 11 03:55:27 crc kubenswrapper[4798]: I1011 03:55:27.433537 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1bf7eb37-55a3-4c65-b768-a94c82151e69" path="/var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes" Oct 11 03:55:27 crc kubenswrapper[4798]: I1011 03:55:27.434644 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1d611f23-29be-4491-8495-bee1670e935f" path="/var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes" Oct 11 03:55:27 crc kubenswrapper[4798]: I1011 03:55:27.435269 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="20b0d48f-5fd6-431c-a545-e3c800c7b866" path="/var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/volumes" Oct 11 03:55:27 crc kubenswrapper[4798]: I1011 03:55:27.436348 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" path="/var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes" Oct 11 03:55:27 crc kubenswrapper[4798]: I1011 03:55:27.436907 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="22c825df-677d-4ca6-82db-3454ed06e783" path="/var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes" Oct 11 03:55:27 crc kubenswrapper[4798]: I1011 03:55:27.438600 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="25e176fe-21b4-4974-b1ed-c8b94f112a7f" path="/var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes" Oct 11 03:55:27 crc kubenswrapper[4798]: I1011 03:55:27.439737 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" path="/var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes" Oct 11 03:55:27 crc kubenswrapper[4798]: I1011 03:55:27.440493 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="31d8b7a1-420e-4252-a5b7-eebe8a111292" path="/var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes" Oct 11 03:55:27 crc kubenswrapper[4798]: I1011 03:55:27.441202 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3ab1a177-2de0-46d9-b765-d0d0649bb42e" path="/var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/volumes" Oct 11 03:55:27 crc kubenswrapper[4798]: I1011 03:55:27.441986 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" path="/var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes" Oct 11 03:55:27 crc kubenswrapper[4798]: I1011 03:55:27.443829 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="43509403-f426-496e-be36-56cef71462f5" path="/var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes" Oct 11 03:55:27 crc kubenswrapper[4798]: I1011 03:55:27.444412 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="44663579-783b-4372-86d6-acf235a62d72" path="/var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/volumes" Oct 11 03:55:27 crc kubenswrapper[4798]: I1011 03:55:27.445104 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="496e6271-fb68-4057-954e-a0d97a4afa3f" path="/var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes" Oct 11 03:55:27 crc kubenswrapper[4798]: I1011 03:55:27.446417 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" path="/var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes" Oct 11 03:55:27 crc kubenswrapper[4798]: I1011 03:55:27.447033 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49ef4625-1d3a-4a9f-b595-c2433d32326d" path="/var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/volumes" Oct 11 03:55:27 crc kubenswrapper[4798]: I1011 03:55:27.448357 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4bb40260-dbaa-4fb0-84df-5e680505d512" path="/var/lib/kubelet/pods/4bb40260-dbaa-4fb0-84df-5e680505d512/volumes" Oct 11 03:55:27 crc kubenswrapper[4798]: I1011 03:55:27.448927 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5225d0e4-402f-4861-b410-819f433b1803" path="/var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes" Oct 11 03:55:27 crc kubenswrapper[4798]: I1011 03:55:27.450251 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5441d097-087c-4d9a-baa8-b210afa90fc9" path="/var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes" Oct 11 03:55:27 crc kubenswrapper[4798]: I1011 03:55:27.450846 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="57a731c4-ef35-47a8-b875-bfb08a7f8011" path="/var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes" Oct 11 03:55:27 crc kubenswrapper[4798]: I1011 03:55:27.451617 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5b88f790-22fa-440e-b583-365168c0b23d" path="/var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/volumes" Oct 11 03:55:27 crc kubenswrapper[4798]: I1011 03:55:27.452820 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5fe579f8-e8a6-4643-bce5-a661393c4dde" path="/var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/volumes" Oct 11 03:55:27 crc kubenswrapper[4798]: I1011 03:55:27.453303 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6402fda4-df10-493c-b4e5-d0569419652d" path="/var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes" Oct 11 03:55:27 crc kubenswrapper[4798]: I1011 03:55:27.454328 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6509e943-70c6-444c-bc41-48a544e36fbd" path="/var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes" Oct 11 03:55:27 crc kubenswrapper[4798]: I1011 03:55:27.454957 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6731426b-95fe-49ff-bb5f-40441049fde2" path="/var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/volumes" Oct 11 03:55:27 crc kubenswrapper[4798]: I1011 03:55:27.455804 4798 kubelet_volumes.go:152] "Cleaned up orphaned volume subpath from pod" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volume-subpaths/run-systemd/ovnkube-controller/6" Oct 11 03:55:27 crc kubenswrapper[4798]: I1011 03:55:27.455907 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volumes" Oct 11 03:55:27 crc kubenswrapper[4798]: I1011 03:55:27.457505 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7539238d-5fe0-46ed-884e-1c3b566537ec" path="/var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes" Oct 11 03:55:27 crc kubenswrapper[4798]: I1011 03:55:27.458017 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7583ce53-e0fe-4a16-9e4d-50516596a136" path="/var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes" Oct 11 03:55:27 crc kubenswrapper[4798]: I1011 03:55:27.458910 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7bb08738-c794-4ee8-9972-3a62ca171029" path="/var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes" Oct 11 03:55:27 crc kubenswrapper[4798]: I1011 03:55:27.460595 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="87cf06ed-a83f-41a7-828d-70653580a8cb" path="/var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes" Oct 11 03:55:27 crc kubenswrapper[4798]: I1011 03:55:27.461200 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" path="/var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes" Oct 11 03:55:27 crc kubenswrapper[4798]: I1011 03:55:27.462146 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="925f1c65-6136-48ba-85aa-3a3b50560753" path="/var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes" Oct 11 03:55:27 crc kubenswrapper[4798]: I1011 03:55:27.462949 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" path="/var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/volumes" Oct 11 03:55:27 crc kubenswrapper[4798]: I1011 03:55:27.464009 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9d4552c7-cd75-42dd-8880-30dd377c49a4" path="/var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes" Oct 11 03:55:27 crc kubenswrapper[4798]: I1011 03:55:27.464528 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" path="/var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/volumes" Oct 11 03:55:27 crc kubenswrapper[4798]: I1011 03:55:27.465515 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a31745f5-9847-4afe-82a5-3161cc66ca93" path="/var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes" Oct 11 03:55:27 crc kubenswrapper[4798]: I1011 03:55:27.466127 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" path="/var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes" Oct 11 03:55:27 crc kubenswrapper[4798]: I1011 03:55:27.467232 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6312bbd-5731-4ea0-a20f-81d5a57df44a" path="/var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/volumes" Oct 11 03:55:27 crc kubenswrapper[4798]: I1011 03:55:27.467714 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" path="/var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes" Oct 11 03:55:27 crc kubenswrapper[4798]: I1011 03:55:27.468748 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" path="/var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes" Oct 11 03:55:27 crc kubenswrapper[4798]: I1011 03:55:27.469283 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" path="/var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/volumes" Oct 11 03:55:27 crc kubenswrapper[4798]: I1011 03:55:27.470826 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bf126b07-da06-4140-9a57-dfd54fc6b486" path="/var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes" Oct 11 03:55:27 crc kubenswrapper[4798]: I1011 03:55:27.471323 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c03ee662-fb2f-4fc4-a2c1-af487c19d254" path="/var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes" Oct 11 03:55:27 crc kubenswrapper[4798]: I1011 03:55:27.472266 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" path="/var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/volumes" Oct 11 03:55:27 crc kubenswrapper[4798]: I1011 03:55:27.472864 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e7e6199b-1264-4501-8953-767f51328d08" path="/var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes" Oct 11 03:55:27 crc kubenswrapper[4798]: I1011 03:55:27.473948 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="efdd0498-1daa-4136-9a4a-3b948c2293fc" path="/var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/volumes" Oct 11 03:55:27 crc kubenswrapper[4798]: I1011 03:55:27.474559 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" path="/var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/volumes" Oct 11 03:55:27 crc kubenswrapper[4798]: I1011 03:55:27.475126 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fda69060-fa79-4696-b1a6-7980f124bf7c" path="/var/lib/kubelet/pods/fda69060-fa79-4696-b1a6-7980f124bf7c/volumes" Oct 11 03:55:27 crc kubenswrapper[4798]: I1011 03:55:27.584599 4798 generic.go:334] "Generic (PLEG): container finished" podID="3c2a342b-5252-4957-9e2c-8f81c304b8af" containerID="9f8c139ffa0a5dc24b40f6d5bcc2fa9422f58b275692d3ce3ba5a928c1e454a7" exitCode=0 Oct 11 03:55:27 crc kubenswrapper[4798]: I1011 03:55:27.584666 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-svt4z" event={"ID":"3c2a342b-5252-4957-9e2c-8f81c304b8af","Type":"ContainerDied","Data":"9f8c139ffa0a5dc24b40f6d5bcc2fa9422f58b275692d3ce3ba5a928c1e454a7"} Oct 11 03:55:27 crc kubenswrapper[4798]: I1011 03:55:27.586618 4798 generic.go:334] "Generic (PLEG): container finished" podID="4b6a179c-1d23-4cb2-91b9-d34496a74456" containerID="8b908ba64c3d6d1d45e1cab82e69176c3c84bf88dde3e9425fc7c3b4c84685ed" exitCode=0 Oct 11 03:55:27 crc kubenswrapper[4798]: I1011 03:55:27.586692 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-7fk5l" event={"ID":"4b6a179c-1d23-4cb2-91b9-d34496a74456","Type":"ContainerDied","Data":"8b908ba64c3d6d1d45e1cab82e69176c3c84bf88dde3e9425fc7c3b4c84685ed"} Oct 11 03:55:27 crc kubenswrapper[4798]: I1011 03:55:27.608483 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6129cc4f78a62c5fb76f4859ec954c8901954d27b7f05672e97d992e0c933191\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:27Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:27 crc kubenswrapper[4798]: I1011 03:55:27.623858 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-p6xdd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bd9c0e44-3329-422a-907b-e9e9bb6194cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a46e4bfff29af67bd6304e68df8fb4aa7e2184648b85398f453fde878b27d825\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p9wst\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-p6xdd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:27Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:27 crc kubenswrapper[4798]: I1011 03:55:27.637429 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-7fk5l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4b6a179c-1d23-4cb2-91b9-d34496a74456\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b908ba64c3d6d1d45e1cab82e69176c3c84bf88dde3e9425fc7c3b4c84685ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-7fk5l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:27Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:27 crc kubenswrapper[4798]: I1011 03:55:27.663448 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-svt4z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3c2a342b-5252-4957-9e2c-8f81c304b8af\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9f8c139ffa0a5dc24b40f6d5bcc2fa9422f58b275692d3ce3ba5a928c1e454a7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f8c139ffa0a5dc24b40f6d5bcc2fa9422f58b275692d3ce3ba5a928c1e454a7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:26Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-svt4z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:27Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:27 crc kubenswrapper[4798]: I1011 03:55:27.678015 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:27Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:27 crc kubenswrapper[4798]: I1011 03:55:27.689715 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42571bc8-2186-4e3b-bba9-28f5a8f364d0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c66c40f71f0877eee058976f94625fe1c4bcab5dfad363be39e6863c2e9f07a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5gdw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d4862faa12fa49bd57098b8b4e9e9dd287da8c8808a9ca32bfdd3a9b4313945d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5gdw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-h28s2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:27Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:27 crc kubenswrapper[4798]: I1011 03:55:27.701534 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:27Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:27 crc kubenswrapper[4798]: I1011 03:55:27.737744 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:27Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:27 crc kubenswrapper[4798]: I1011 03:55:27.794908 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kszgd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e999fc8d-dd8d-415f-b72b-1c6189cf18a9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a92a11778001614d226c95af9dde787c78232568c3b5c073618a843f1225b21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h6wm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kszgd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:27Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:27 crc kubenswrapper[4798]: I1011 03:55:27.835294 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:27Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:27 crc kubenswrapper[4798]: I1011 03:55:27.870868 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bfd670e2-5a6d-4d56-97d6-c72b5474cfe6\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:05Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:05Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d79289a0b2f4d9eee836f57ba898bd329060194772e80855547f1982371b0921\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1970a648bf4ee991b9b58bfade524bee260b45f727cba5864a6545a10487b3ae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a79edde80930f3aad632c14d6ad6b3df6f8449f32107a82eb906785e873e1ab7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4b8e09f509a9f25922d49024be407873d79d01edb604193f9d19fcd6c89cf96d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://805f894634e76814dc3071315ddd3d8ac46cdf65a12e97f4ca56b98fe53ab25a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-11T03:55:19Z\\\",\\\"message\\\":\\\"W1011 03:55:08.564227 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1011 03:55:08.564630 1 crypto.go:601] Generating new CA for check-endpoints-signer@1760154908 cert, and key in /tmp/serving-cert-1527351980/serving-signer.crt, /tmp/serving-cert-1527351980/serving-signer.key\\\\nI1011 03:55:08.869630 1 observer_polling.go:159] Starting file observer\\\\nW1011 03:55:08.874583 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1011 03:55:08.874721 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1011 03:55:08.878058 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1527351980/tls.crt::/tmp/serving-cert-1527351980/tls.key\\\\\\\"\\\\nF1011 03:55:19.813421 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:08Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://58ca7c079d81ba990c4508c024d6db7c3503be1d628477798459b32e2374ef22\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:08Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1171e7b719c532d99d1d222fc5dbc434060d5d149bd2b6b3dea02f9716816b26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1171e7b719c532d99d1d222fc5dbc434060d5d149bd2b6b3dea02f9716816b26\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:05Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:27Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:27 crc kubenswrapper[4798]: I1011 03:55:27.911456 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2f4f8842-e378-44a9-b919-b1bf9a8df80e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://903e13c07c622087e30f21ffff2b60c31c852d8e994c23d7af6beab5a71bb873\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1087e7b8de486a192155ed94497edbca0dc98e4a84a577267fb132fc48e6b142\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://08a33c11cee6a48ebcf66f251111b8bd888bd40fe6b52f43a3768eac817ed59f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eff419f507fc5362e956419c6fd14dc6a161f7297b026a5ec449647051f98767\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:05Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:27Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:27 crc kubenswrapper[4798]: I1011 03:55:27.938279 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5ec61df088ad49a0392264c33da84b7324c4aa608201d71e309401ab80013fb8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://10697a88b117962307b632f70354acdfd493e92c2909908a31c90c754dcbc0ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:27Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:27 crc kubenswrapper[4798]: I1011 03:55:27.991629 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:27Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:28 crc kubenswrapper[4798]: I1011 03:55:28.029546 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42571bc8-2186-4e3b-bba9-28f5a8f364d0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c66c40f71f0877eee058976f94625fe1c4bcab5dfad363be39e6863c2e9f07a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5gdw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d4862faa12fa49bd57098b8b4e9e9dd287da8c8808a9ca32bfdd3a9b4313945d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5gdw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-h28s2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:28Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:28 crc kubenswrapper[4798]: I1011 03:55:28.062742 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-7fk5l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4b6a179c-1d23-4cb2-91b9-d34496a74456\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b908ba64c3d6d1d45e1cab82e69176c3c84bf88dde3e9425fc7c3b4c84685ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8b908ba64c3d6d1d45e1cab82e69176c3c84bf88dde3e9425fc7c3b4c84685ed\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-7fk5l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:28Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:28 crc kubenswrapper[4798]: I1011 03:55:28.113072 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-svt4z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3c2a342b-5252-4957-9e2c-8f81c304b8af\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9f8c139ffa0a5dc24b40f6d5bcc2fa9422f58b275692d3ce3ba5a928c1e454a7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f8c139ffa0a5dc24b40f6d5bcc2fa9422f58b275692d3ce3ba5a928c1e454a7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:26Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-svt4z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:28Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:28 crc kubenswrapper[4798]: I1011 03:55:28.136991 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kszgd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e999fc8d-dd8d-415f-b72b-1c6189cf18a9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a92a11778001614d226c95af9dde787c78232568c3b5c073618a843f1225b21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h6wm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kszgd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:28Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:28 crc kubenswrapper[4798]: I1011 03:55:28.181755 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:28Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:28 crc kubenswrapper[4798]: I1011 03:55:28.221851 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bfd670e2-5a6d-4d56-97d6-c72b5474cfe6\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:05Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:05Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d79289a0b2f4d9eee836f57ba898bd329060194772e80855547f1982371b0921\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1970a648bf4ee991b9b58bfade524bee260b45f727cba5864a6545a10487b3ae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a79edde80930f3aad632c14d6ad6b3df6f8449f32107a82eb906785e873e1ab7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4b8e09f509a9f25922d49024be407873d79d01edb604193f9d19fcd6c89cf96d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://805f894634e76814dc3071315ddd3d8ac46cdf65a12e97f4ca56b98fe53ab25a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-11T03:55:19Z\\\",\\\"message\\\":\\\"W1011 03:55:08.564227 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1011 03:55:08.564630 1 crypto.go:601] Generating new CA for check-endpoints-signer@1760154908 cert, and key in /tmp/serving-cert-1527351980/serving-signer.crt, /tmp/serving-cert-1527351980/serving-signer.key\\\\nI1011 03:55:08.869630 1 observer_polling.go:159] Starting file observer\\\\nW1011 03:55:08.874583 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1011 03:55:08.874721 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1011 03:55:08.878058 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1527351980/tls.crt::/tmp/serving-cert-1527351980/tls.key\\\\\\\"\\\\nF1011 03:55:19.813421 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:08Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://58ca7c079d81ba990c4508c024d6db7c3503be1d628477798459b32e2374ef22\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:08Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1171e7b719c532d99d1d222fc5dbc434060d5d149bd2b6b3dea02f9716816b26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1171e7b719c532d99d1d222fc5dbc434060d5d149bd2b6b3dea02f9716816b26\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:05Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:28Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:28 crc kubenswrapper[4798]: I1011 03:55:28.261068 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2f4f8842-e378-44a9-b919-b1bf9a8df80e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://903e13c07c622087e30f21ffff2b60c31c852d8e994c23d7af6beab5a71bb873\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1087e7b8de486a192155ed94497edbca0dc98e4a84a577267fb132fc48e6b142\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://08a33c11cee6a48ebcf66f251111b8bd888bd40fe6b52f43a3768eac817ed59f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eff419f507fc5362e956419c6fd14dc6a161f7297b026a5ec449647051f98767\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:05Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:28Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:28 crc kubenswrapper[4798]: I1011 03:55:28.299079 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:28Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:28 crc kubenswrapper[4798]: I1011 03:55:28.328591 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/node-ca-cwk9h"] Oct 11 03:55:28 crc kubenswrapper[4798]: I1011 03:55:28.329120 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-cwk9h" Oct 11 03:55:28 crc kubenswrapper[4798]: I1011 03:55:28.342780 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:28Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:28 crc kubenswrapper[4798]: I1011 03:55:28.349779 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Oct 11 03:55:28 crc kubenswrapper[4798]: I1011 03:55:28.369463 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Oct 11 03:55:28 crc kubenswrapper[4798]: I1011 03:55:28.389594 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"node-ca-dockercfg-4777p" Oct 11 03:55:28 crc kubenswrapper[4798]: I1011 03:55:28.409181 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Oct 11 03:55:28 crc kubenswrapper[4798]: I1011 03:55:28.464486 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5ec61df088ad49a0392264c33da84b7324c4aa608201d71e309401ab80013fb8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://10697a88b117962307b632f70354acdfd493e92c2909908a31c90c754dcbc0ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:28Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:28 crc kubenswrapper[4798]: I1011 03:55:28.491619 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pfvbf\" (UniqueName: \"kubernetes.io/projected/a97c4ffd-df63-4ce6-8903-80849a23a9fa-kube-api-access-pfvbf\") pod \"node-ca-cwk9h\" (UID: \"a97c4ffd-df63-4ce6-8903-80849a23a9fa\") " pod="openshift-image-registry/node-ca-cwk9h" Oct 11 03:55:28 crc kubenswrapper[4798]: I1011 03:55:28.491699 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/a97c4ffd-df63-4ce6-8903-80849a23a9fa-host\") pod \"node-ca-cwk9h\" (UID: \"a97c4ffd-df63-4ce6-8903-80849a23a9fa\") " pod="openshift-image-registry/node-ca-cwk9h" Oct 11 03:55:28 crc kubenswrapper[4798]: I1011 03:55:28.491719 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/a97c4ffd-df63-4ce6-8903-80849a23a9fa-serviceca\") pod \"node-ca-cwk9h\" (UID: \"a97c4ffd-df63-4ce6-8903-80849a23a9fa\") " pod="openshift-image-registry/node-ca-cwk9h" Oct 11 03:55:28 crc kubenswrapper[4798]: I1011 03:55:28.502495 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6129cc4f78a62c5fb76f4859ec954c8901954d27b7f05672e97d992e0c933191\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:28Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:28 crc kubenswrapper[4798]: I1011 03:55:28.540476 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-p6xdd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bd9c0e44-3329-422a-907b-e9e9bb6194cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a46e4bfff29af67bd6304e68df8fb4aa7e2184648b85398f453fde878b27d825\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p9wst\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-p6xdd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:28Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:28 crc kubenswrapper[4798]: I1011 03:55:28.586457 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5ec61df088ad49a0392264c33da84b7324c4aa608201d71e309401ab80013fb8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://10697a88b117962307b632f70354acdfd493e92c2909908a31c90c754dcbc0ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:28Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:28 crc kubenswrapper[4798]: I1011 03:55:28.592356 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/a97c4ffd-df63-4ce6-8903-80849a23a9fa-host\") pod \"node-ca-cwk9h\" (UID: \"a97c4ffd-df63-4ce6-8903-80849a23a9fa\") " pod="openshift-image-registry/node-ca-cwk9h" Oct 11 03:55:28 crc kubenswrapper[4798]: I1011 03:55:28.592402 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/a97c4ffd-df63-4ce6-8903-80849a23a9fa-serviceca\") pod \"node-ca-cwk9h\" (UID: \"a97c4ffd-df63-4ce6-8903-80849a23a9fa\") " pod="openshift-image-registry/node-ca-cwk9h" Oct 11 03:55:28 crc kubenswrapper[4798]: I1011 03:55:28.592455 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pfvbf\" (UniqueName: \"kubernetes.io/projected/a97c4ffd-df63-4ce6-8903-80849a23a9fa-kube-api-access-pfvbf\") pod \"node-ca-cwk9h\" (UID: \"a97c4ffd-df63-4ce6-8903-80849a23a9fa\") " pod="openshift-image-registry/node-ca-cwk9h" Oct 11 03:55:28 crc kubenswrapper[4798]: I1011 03:55:28.593884 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/a97c4ffd-df63-4ce6-8903-80849a23a9fa-serviceca\") pod \"node-ca-cwk9h\" (UID: \"a97c4ffd-df63-4ce6-8903-80849a23a9fa\") " pod="openshift-image-registry/node-ca-cwk9h" Oct 11 03:55:28 crc kubenswrapper[4798]: I1011 03:55:28.593888 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/a97c4ffd-df63-4ce6-8903-80849a23a9fa-host\") pod \"node-ca-cwk9h\" (UID: \"a97c4ffd-df63-4ce6-8903-80849a23a9fa\") " pod="openshift-image-registry/node-ca-cwk9h" Oct 11 03:55:28 crc kubenswrapper[4798]: I1011 03:55:28.594866 4798 generic.go:334] "Generic (PLEG): container finished" podID="4b6a179c-1d23-4cb2-91b9-d34496a74456" containerID="ae38c0781a4a8367bba95648253e72409e06cdc39825b137084fa6f4d33a4cb7" exitCode=0 Oct 11 03:55:28 crc kubenswrapper[4798]: I1011 03:55:28.594933 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-7fk5l" event={"ID":"4b6a179c-1d23-4cb2-91b9-d34496a74456","Type":"ContainerDied","Data":"ae38c0781a4a8367bba95648253e72409e06cdc39825b137084fa6f4d33a4cb7"} Oct 11 03:55:28 crc kubenswrapper[4798]: I1011 03:55:28.601628 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-svt4z" event={"ID":"3c2a342b-5252-4957-9e2c-8f81c304b8af","Type":"ContainerStarted","Data":"dd63d88fe2ebe6035765ee3f3c05c2ef747f9b2869da239825c08667935c9fb5"} Oct 11 03:55:28 crc kubenswrapper[4798]: I1011 03:55:28.601718 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-svt4z" event={"ID":"3c2a342b-5252-4957-9e2c-8f81c304b8af","Type":"ContainerStarted","Data":"671945e25068ff79ab8574dd96b2d81646b8240da41f34a10aa857f0c2520df2"} Oct 11 03:55:28 crc kubenswrapper[4798]: I1011 03:55:28.601751 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-svt4z" event={"ID":"3c2a342b-5252-4957-9e2c-8f81c304b8af","Type":"ContainerStarted","Data":"5667e3d7288833ff8c576f64f111c3a2f59619a3577f969d0afdd633962bd30d"} Oct 11 03:55:28 crc kubenswrapper[4798]: I1011 03:55:28.601777 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-svt4z" event={"ID":"3c2a342b-5252-4957-9e2c-8f81c304b8af","Type":"ContainerStarted","Data":"e5a66de04a274f07f090f66d9da727e1985f9c171038de366b409a0300d1b6cc"} Oct 11 03:55:28 crc kubenswrapper[4798]: I1011 03:55:28.601812 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-svt4z" event={"ID":"3c2a342b-5252-4957-9e2c-8f81c304b8af","Type":"ContainerStarted","Data":"337fb496a29d8027f6400b57b1625b68029a16e368139f753b9b94df22448477"} Oct 11 03:55:28 crc kubenswrapper[4798]: I1011 03:55:28.601837 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-svt4z" event={"ID":"3c2a342b-5252-4957-9e2c-8f81c304b8af","Type":"ContainerStarted","Data":"4c9529e6c53d42e111799753c41ef620b382b2a2b1991aa71d4ee846029a5136"} Oct 11 03:55:28 crc kubenswrapper[4798]: I1011 03:55:28.634815 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pfvbf\" (UniqueName: \"kubernetes.io/projected/a97c4ffd-df63-4ce6-8903-80849a23a9fa-kube-api-access-pfvbf\") pod \"node-ca-cwk9h\" (UID: \"a97c4ffd-df63-4ce6-8903-80849a23a9fa\") " pod="openshift-image-registry/node-ca-cwk9h" Oct 11 03:55:28 crc kubenswrapper[4798]: I1011 03:55:28.642632 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6129cc4f78a62c5fb76f4859ec954c8901954d27b7f05672e97d992e0c933191\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:28Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:28 crc kubenswrapper[4798]: I1011 03:55:28.642848 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-cwk9h" Oct 11 03:55:28 crc kubenswrapper[4798]: I1011 03:55:28.683920 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-p6xdd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bd9c0e44-3329-422a-907b-e9e9bb6194cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a46e4bfff29af67bd6304e68df8fb4aa7e2184648b85398f453fde878b27d825\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p9wst\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-p6xdd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:28Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:28 crc kubenswrapper[4798]: I1011 03:55:28.717799 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:28Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:28 crc kubenswrapper[4798]: I1011 03:55:28.757185 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42571bc8-2186-4e3b-bba9-28f5a8f364d0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c66c40f71f0877eee058976f94625fe1c4bcab5dfad363be39e6863c2e9f07a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5gdw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d4862faa12fa49bd57098b8b4e9e9dd287da8c8808a9ca32bfdd3a9b4313945d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5gdw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-h28s2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:28Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:28 crc kubenswrapper[4798]: I1011 03:55:28.801908 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-7fk5l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4b6a179c-1d23-4cb2-91b9-d34496a74456\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b908ba64c3d6d1d45e1cab82e69176c3c84bf88dde3e9425fc7c3b4c84685ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8b908ba64c3d6d1d45e1cab82e69176c3c84bf88dde3e9425fc7c3b4c84685ed\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-7fk5l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:28Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:28 crc kubenswrapper[4798]: I1011 03:55:28.846957 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-svt4z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3c2a342b-5252-4957-9e2c-8f81c304b8af\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9f8c139ffa0a5dc24b40f6d5bcc2fa9422f58b275692d3ce3ba5a928c1e454a7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f8c139ffa0a5dc24b40f6d5bcc2fa9422f58b275692d3ce3ba5a928c1e454a7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:26Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-svt4z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:28Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:28 crc kubenswrapper[4798]: I1011 03:55:28.875501 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-cwk9h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a97c4ffd-df63-4ce6-8903-80849a23a9fa\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:28Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:28Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pfvbf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:28Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-cwk9h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:28Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:28 crc kubenswrapper[4798]: I1011 03:55:28.921454 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bfd670e2-5a6d-4d56-97d6-c72b5474cfe6\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:05Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:05Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d79289a0b2f4d9eee836f57ba898bd329060194772e80855547f1982371b0921\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1970a648bf4ee991b9b58bfade524bee260b45f727cba5864a6545a10487b3ae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a79edde80930f3aad632c14d6ad6b3df6f8449f32107a82eb906785e873e1ab7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4b8e09f509a9f25922d49024be407873d79d01edb604193f9d19fcd6c89cf96d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://805f894634e76814dc3071315ddd3d8ac46cdf65a12e97f4ca56b98fe53ab25a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-11T03:55:19Z\\\",\\\"message\\\":\\\"W1011 03:55:08.564227 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1011 03:55:08.564630 1 crypto.go:601] Generating new CA for check-endpoints-signer@1760154908 cert, and key in /tmp/serving-cert-1527351980/serving-signer.crt, /tmp/serving-cert-1527351980/serving-signer.key\\\\nI1011 03:55:08.869630 1 observer_polling.go:159] Starting file observer\\\\nW1011 03:55:08.874583 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1011 03:55:08.874721 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1011 03:55:08.878058 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1527351980/tls.crt::/tmp/serving-cert-1527351980/tls.key\\\\\\\"\\\\nF1011 03:55:19.813421 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:08Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://58ca7c079d81ba990c4508c024d6db7c3503be1d628477798459b32e2374ef22\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:08Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1171e7b719c532d99d1d222fc5dbc434060d5d149bd2b6b3dea02f9716816b26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1171e7b719c532d99d1d222fc5dbc434060d5d149bd2b6b3dea02f9716816b26\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:05Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:28Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:28 crc kubenswrapper[4798]: I1011 03:55:28.962718 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2f4f8842-e378-44a9-b919-b1bf9a8df80e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://903e13c07c622087e30f21ffff2b60c31c852d8e994c23d7af6beab5a71bb873\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1087e7b8de486a192155ed94497edbca0dc98e4a84a577267fb132fc48e6b142\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://08a33c11cee6a48ebcf66f251111b8bd888bd40fe6b52f43a3768eac817ed59f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eff419f507fc5362e956419c6fd14dc6a161f7297b026a5ec449647051f98767\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:05Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:28Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:29 crc kubenswrapper[4798]: I1011 03:55:28.999029 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 11 03:55:29 crc kubenswrapper[4798]: I1011 03:55:28.999096 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 11 03:55:29 crc kubenswrapper[4798]: I1011 03:55:28.999129 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 11 03:55:29 crc kubenswrapper[4798]: I1011 03:55:28.999153 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 11 03:55:29 crc kubenswrapper[4798]: E1011 03:55:28.999296 4798 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Oct 11 03:55:29 crc kubenswrapper[4798]: E1011 03:55:28.999324 4798 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Oct 11 03:55:29 crc kubenswrapper[4798]: E1011 03:55:28.999433 4798 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-10-11 03:55:32.999363522 +0000 UTC m=+28.335653208 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Oct 11 03:55:29 crc kubenswrapper[4798]: E1011 03:55:28.999451 4798 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Oct 11 03:55:29 crc kubenswrapper[4798]: E1011 03:55:28.999491 4798 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Oct 11 03:55:29 crc kubenswrapper[4798]: E1011 03:55:28.999499 4798 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-10-11 03:55:32.999464974 +0000 UTC m=+28.335754680 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Oct 11 03:55:29 crc kubenswrapper[4798]: E1011 03:55:28.999517 4798 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Oct 11 03:55:29 crc kubenswrapper[4798]: E1011 03:55:28.999557 4798 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Oct 11 03:55:29 crc kubenswrapper[4798]: E1011 03:55:28.999575 4798 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-10-11 03:55:32.999556236 +0000 UTC m=+28.335845952 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Oct 11 03:55:29 crc kubenswrapper[4798]: E1011 03:55:28.999576 4798 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Oct 11 03:55:29 crc kubenswrapper[4798]: E1011 03:55:28.999610 4798 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Oct 11 03:55:29 crc kubenswrapper[4798]: E1011 03:55:28.999652 4798 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-10-11 03:55:32.999638568 +0000 UTC m=+28.335928284 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Oct 11 03:55:29 crc kubenswrapper[4798]: I1011 03:55:29.001288 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:28Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:29 crc kubenswrapper[4798]: I1011 03:55:29.044190 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:29Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:29 crc kubenswrapper[4798]: I1011 03:55:29.077596 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kszgd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e999fc8d-dd8d-415f-b72b-1c6189cf18a9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a92a11778001614d226c95af9dde787c78232568c3b5c073618a843f1225b21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h6wm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kszgd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:29Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:29 crc kubenswrapper[4798]: I1011 03:55:29.100455 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 03:55:29 crc kubenswrapper[4798]: E1011 03:55:29.100668 4798 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-11 03:55:33.100639754 +0000 UTC m=+28.436929440 (durationBeforeRetry 4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 03:55:29 crc kubenswrapper[4798]: I1011 03:55:29.119176 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:29Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:29 crc kubenswrapper[4798]: I1011 03:55:29.161314 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:29Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:29 crc kubenswrapper[4798]: I1011 03:55:29.201765 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:29Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:29 crc kubenswrapper[4798]: I1011 03:55:29.236848 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kszgd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e999fc8d-dd8d-415f-b72b-1c6189cf18a9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a92a11778001614d226c95af9dde787c78232568c3b5c073618a843f1225b21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h6wm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kszgd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:29Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:29 crc kubenswrapper[4798]: I1011 03:55:29.279469 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:29Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:29 crc kubenswrapper[4798]: I1011 03:55:29.324967 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bfd670e2-5a6d-4d56-97d6-c72b5474cfe6\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:05Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:05Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d79289a0b2f4d9eee836f57ba898bd329060194772e80855547f1982371b0921\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1970a648bf4ee991b9b58bfade524bee260b45f727cba5864a6545a10487b3ae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a79edde80930f3aad632c14d6ad6b3df6f8449f32107a82eb906785e873e1ab7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4b8e09f509a9f25922d49024be407873d79d01edb604193f9d19fcd6c89cf96d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://805f894634e76814dc3071315ddd3d8ac46cdf65a12e97f4ca56b98fe53ab25a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-11T03:55:19Z\\\",\\\"message\\\":\\\"W1011 03:55:08.564227 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1011 03:55:08.564630 1 crypto.go:601] Generating new CA for check-endpoints-signer@1760154908 cert, and key in /tmp/serving-cert-1527351980/serving-signer.crt, /tmp/serving-cert-1527351980/serving-signer.key\\\\nI1011 03:55:08.869630 1 observer_polling.go:159] Starting file observer\\\\nW1011 03:55:08.874583 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1011 03:55:08.874721 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1011 03:55:08.878058 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1527351980/tls.crt::/tmp/serving-cert-1527351980/tls.key\\\\\\\"\\\\nF1011 03:55:19.813421 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:08Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://58ca7c079d81ba990c4508c024d6db7c3503be1d628477798459b32e2374ef22\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:08Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1171e7b719c532d99d1d222fc5dbc434060d5d149bd2b6b3dea02f9716816b26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1171e7b719c532d99d1d222fc5dbc434060d5d149bd2b6b3dea02f9716816b26\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:05Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:29Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:29 crc kubenswrapper[4798]: I1011 03:55:29.358198 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2f4f8842-e378-44a9-b919-b1bf9a8df80e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://903e13c07c622087e30f21ffff2b60c31c852d8e994c23d7af6beab5a71bb873\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1087e7b8de486a192155ed94497edbca0dc98e4a84a577267fb132fc48e6b142\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://08a33c11cee6a48ebcf66f251111b8bd888bd40fe6b52f43a3768eac817ed59f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eff419f507fc5362e956419c6fd14dc6a161f7297b026a5ec449647051f98767\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:05Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:29Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:29 crc kubenswrapper[4798]: I1011 03:55:29.397418 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5ec61df088ad49a0392264c33da84b7324c4aa608201d71e309401ab80013fb8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://10697a88b117962307b632f70354acdfd493e92c2909908a31c90c754dcbc0ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:29Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:29 crc kubenswrapper[4798]: I1011 03:55:29.422670 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 11 03:55:29 crc kubenswrapper[4798]: I1011 03:55:29.422692 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 11 03:55:29 crc kubenswrapper[4798]: E1011 03:55:29.422966 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 11 03:55:29 crc kubenswrapper[4798]: E1011 03:55:29.422845 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 11 03:55:29 crc kubenswrapper[4798]: I1011 03:55:29.423049 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 11 03:55:29 crc kubenswrapper[4798]: E1011 03:55:29.423250 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 11 03:55:29 crc kubenswrapper[4798]: I1011 03:55:29.437646 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6129cc4f78a62c5fb76f4859ec954c8901954d27b7f05672e97d992e0c933191\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:29Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:29 crc kubenswrapper[4798]: I1011 03:55:29.478801 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-p6xdd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bd9c0e44-3329-422a-907b-e9e9bb6194cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a46e4bfff29af67bd6304e68df8fb4aa7e2184648b85398f453fde878b27d825\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p9wst\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-p6xdd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:29Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:29 crc kubenswrapper[4798]: I1011 03:55:29.520361 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-7fk5l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4b6a179c-1d23-4cb2-91b9-d34496a74456\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with incomplete status: [bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b908ba64c3d6d1d45e1cab82e69176c3c84bf88dde3e9425fc7c3b4c84685ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8b908ba64c3d6d1d45e1cab82e69176c3c84bf88dde3e9425fc7c3b4c84685ed\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae38c0781a4a8367bba95648253e72409e06cdc39825b137084fa6f4d33a4cb7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ae38c0781a4a8367bba95648253e72409e06cdc39825b137084fa6f4d33a4cb7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-7fk5l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:29Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:29 crc kubenswrapper[4798]: I1011 03:55:29.565975 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-svt4z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3c2a342b-5252-4957-9e2c-8f81c304b8af\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9f8c139ffa0a5dc24b40f6d5bcc2fa9422f58b275692d3ce3ba5a928c1e454a7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f8c139ffa0a5dc24b40f6d5bcc2fa9422f58b275692d3ce3ba5a928c1e454a7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:26Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-svt4z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:29Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:29 crc kubenswrapper[4798]: I1011 03:55:29.599495 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-cwk9h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a97c4ffd-df63-4ce6-8903-80849a23a9fa\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:28Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:28Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pfvbf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:28Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-cwk9h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:29Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:29 crc kubenswrapper[4798]: I1011 03:55:29.608468 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"4936cbe98df337a2b4d0d6e3effe0afd990facff65daf923c475c9108e05fef9"} Oct 11 03:55:29 crc kubenswrapper[4798]: I1011 03:55:29.611050 4798 generic.go:334] "Generic (PLEG): container finished" podID="4b6a179c-1d23-4cb2-91b9-d34496a74456" containerID="b663ffff34d80940880b3959c81519084b55bd5edcedbce9402d5fad1f898034" exitCode=0 Oct 11 03:55:29 crc kubenswrapper[4798]: I1011 03:55:29.611135 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-7fk5l" event={"ID":"4b6a179c-1d23-4cb2-91b9-d34496a74456","Type":"ContainerDied","Data":"b663ffff34d80940880b3959c81519084b55bd5edcedbce9402d5fad1f898034"} Oct 11 03:55:29 crc kubenswrapper[4798]: I1011 03:55:29.613368 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-cwk9h" event={"ID":"a97c4ffd-df63-4ce6-8903-80849a23a9fa","Type":"ContainerStarted","Data":"3dc58b162f1d2b3447282c149ec7cd6d60b4c53fa3cf51dfb4a821d115c9a2c7"} Oct 11 03:55:29 crc kubenswrapper[4798]: I1011 03:55:29.613450 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-cwk9h" event={"ID":"a97c4ffd-df63-4ce6-8903-80849a23a9fa","Type":"ContainerStarted","Data":"1ef42864a9d55c7ac82c3f69af7749e8517d66ec09ed1d50b02e0b79d564041e"} Oct 11 03:55:29 crc kubenswrapper[4798]: I1011 03:55:29.642679 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:29Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:29 crc kubenswrapper[4798]: I1011 03:55:29.679346 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42571bc8-2186-4e3b-bba9-28f5a8f364d0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c66c40f71f0877eee058976f94625fe1c4bcab5dfad363be39e6863c2e9f07a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5gdw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d4862faa12fa49bd57098b8b4e9e9dd287da8c8808a9ca32bfdd3a9b4313945d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5gdw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-h28s2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:29Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:29 crc kubenswrapper[4798]: I1011 03:55:29.717943 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:29Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:29 crc kubenswrapper[4798]: I1011 03:55:29.759072 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42571bc8-2186-4e3b-bba9-28f5a8f364d0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c66c40f71f0877eee058976f94625fe1c4bcab5dfad363be39e6863c2e9f07a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5gdw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d4862faa12fa49bd57098b8b4e9e9dd287da8c8808a9ca32bfdd3a9b4313945d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5gdw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-h28s2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:29Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:29 crc kubenswrapper[4798]: I1011 03:55:29.800591 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-7fk5l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4b6a179c-1d23-4cb2-91b9-d34496a74456\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with incomplete status: [routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b908ba64c3d6d1d45e1cab82e69176c3c84bf88dde3e9425fc7c3b4c84685ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8b908ba64c3d6d1d45e1cab82e69176c3c84bf88dde3e9425fc7c3b4c84685ed\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae38c0781a4a8367bba95648253e72409e06cdc39825b137084fa6f4d33a4cb7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ae38c0781a4a8367bba95648253e72409e06cdc39825b137084fa6f4d33a4cb7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b663ffff34d80940880b3959c81519084b55bd5edcedbce9402d5fad1f898034\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b663ffff34d80940880b3959c81519084b55bd5edcedbce9402d5fad1f898034\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-7fk5l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:29Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:29 crc kubenswrapper[4798]: I1011 03:55:29.846495 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-svt4z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3c2a342b-5252-4957-9e2c-8f81c304b8af\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9f8c139ffa0a5dc24b40f6d5bcc2fa9422f58b275692d3ce3ba5a928c1e454a7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f8c139ffa0a5dc24b40f6d5bcc2fa9422f58b275692d3ce3ba5a928c1e454a7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:26Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-svt4z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:29Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:29 crc kubenswrapper[4798]: I1011 03:55:29.878069 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-cwk9h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a97c4ffd-df63-4ce6-8903-80849a23a9fa\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3dc58b162f1d2b3447282c149ec7cd6d60b4c53fa3cf51dfb4a821d115c9a2c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pfvbf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:28Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-cwk9h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:29Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:29 crc kubenswrapper[4798]: I1011 03:55:29.919553 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bfd670e2-5a6d-4d56-97d6-c72b5474cfe6\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:05Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:05Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d79289a0b2f4d9eee836f57ba898bd329060194772e80855547f1982371b0921\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1970a648bf4ee991b9b58bfade524bee260b45f727cba5864a6545a10487b3ae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a79edde80930f3aad632c14d6ad6b3df6f8449f32107a82eb906785e873e1ab7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4b8e09f509a9f25922d49024be407873d79d01edb604193f9d19fcd6c89cf96d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://805f894634e76814dc3071315ddd3d8ac46cdf65a12e97f4ca56b98fe53ab25a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-11T03:55:19Z\\\",\\\"message\\\":\\\"W1011 03:55:08.564227 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1011 03:55:08.564630 1 crypto.go:601] Generating new CA for check-endpoints-signer@1760154908 cert, and key in /tmp/serving-cert-1527351980/serving-signer.crt, /tmp/serving-cert-1527351980/serving-signer.key\\\\nI1011 03:55:08.869630 1 observer_polling.go:159] Starting file observer\\\\nW1011 03:55:08.874583 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1011 03:55:08.874721 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1011 03:55:08.878058 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1527351980/tls.crt::/tmp/serving-cert-1527351980/tls.key\\\\\\\"\\\\nF1011 03:55:19.813421 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:08Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://58ca7c079d81ba990c4508c024d6db7c3503be1d628477798459b32e2374ef22\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:08Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1171e7b719c532d99d1d222fc5dbc434060d5d149bd2b6b3dea02f9716816b26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1171e7b719c532d99d1d222fc5dbc434060d5d149bd2b6b3dea02f9716816b26\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:05Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:29Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:29 crc kubenswrapper[4798]: I1011 03:55:29.958947 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2f4f8842-e378-44a9-b919-b1bf9a8df80e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://903e13c07c622087e30f21ffff2b60c31c852d8e994c23d7af6beab5a71bb873\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1087e7b8de486a192155ed94497edbca0dc98e4a84a577267fb132fc48e6b142\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://08a33c11cee6a48ebcf66f251111b8bd888bd40fe6b52f43a3768eac817ed59f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eff419f507fc5362e956419c6fd14dc6a161f7297b026a5ec449647051f98767\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:05Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:29Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:29 crc kubenswrapper[4798]: I1011 03:55:29.999267 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4936cbe98df337a2b4d0d6e3effe0afd990facff65daf923c475c9108e05fef9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:29Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:30 crc kubenswrapper[4798]: I1011 03:55:30.043118 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:30Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:30 crc kubenswrapper[4798]: I1011 03:55:30.081764 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kszgd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e999fc8d-dd8d-415f-b72b-1c6189cf18a9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a92a11778001614d226c95af9dde787c78232568c3b5c073618a843f1225b21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h6wm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kszgd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:30Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:30 crc kubenswrapper[4798]: I1011 03:55:30.125618 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:30Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:30 crc kubenswrapper[4798]: I1011 03:55:30.163038 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5ec61df088ad49a0392264c33da84b7324c4aa608201d71e309401ab80013fb8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://10697a88b117962307b632f70354acdfd493e92c2909908a31c90c754dcbc0ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:30Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:30 crc kubenswrapper[4798]: I1011 03:55:30.201871 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6129cc4f78a62c5fb76f4859ec954c8901954d27b7f05672e97d992e0c933191\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:30Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:30 crc kubenswrapper[4798]: I1011 03:55:30.240747 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-p6xdd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bd9c0e44-3329-422a-907b-e9e9bb6194cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a46e4bfff29af67bd6304e68df8fb4aa7e2184648b85398f453fde878b27d825\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p9wst\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-p6xdd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:30Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:30 crc kubenswrapper[4798]: I1011 03:55:30.618043 4798 generic.go:334] "Generic (PLEG): container finished" podID="4b6a179c-1d23-4cb2-91b9-d34496a74456" containerID="5b4df13fbe7bbb9ba49f772c2bd4bfe6a10a17bfa12bc500f0647659be7b0cdf" exitCode=0 Oct 11 03:55:30 crc kubenswrapper[4798]: I1011 03:55:30.618109 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-7fk5l" event={"ID":"4b6a179c-1d23-4cb2-91b9-d34496a74456","Type":"ContainerDied","Data":"5b4df13fbe7bbb9ba49f772c2bd4bfe6a10a17bfa12bc500f0647659be7b0cdf"} Oct 11 03:55:30 crc kubenswrapper[4798]: I1011 03:55:30.624011 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-svt4z" event={"ID":"3c2a342b-5252-4957-9e2c-8f81c304b8af","Type":"ContainerStarted","Data":"732433bbe785758ed4cbedf667608c4780a5d1e7e2eafd432735f0d74ea01d48"} Oct 11 03:55:30 crc kubenswrapper[4798]: I1011 03:55:30.656001 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-svt4z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3c2a342b-5252-4957-9e2c-8f81c304b8af\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9f8c139ffa0a5dc24b40f6d5bcc2fa9422f58b275692d3ce3ba5a928c1e454a7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f8c139ffa0a5dc24b40f6d5bcc2fa9422f58b275692d3ce3ba5a928c1e454a7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:26Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-svt4z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:30Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:30 crc kubenswrapper[4798]: I1011 03:55:30.669058 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-cwk9h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a97c4ffd-df63-4ce6-8903-80849a23a9fa\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3dc58b162f1d2b3447282c149ec7cd6d60b4c53fa3cf51dfb4a821d115c9a2c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pfvbf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:28Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-cwk9h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:30Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:30 crc kubenswrapper[4798]: I1011 03:55:30.680662 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:30Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:30 crc kubenswrapper[4798]: I1011 03:55:30.694852 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42571bc8-2186-4e3b-bba9-28f5a8f364d0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c66c40f71f0877eee058976f94625fe1c4bcab5dfad363be39e6863c2e9f07a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5gdw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d4862faa12fa49bd57098b8b4e9e9dd287da8c8808a9ca32bfdd3a9b4313945d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5gdw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-h28s2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:30Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:30 crc kubenswrapper[4798]: I1011 03:55:30.709323 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-7fk5l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4b6a179c-1d23-4cb2-91b9-d34496a74456\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b908ba64c3d6d1d45e1cab82e69176c3c84bf88dde3e9425fc7c3b4c84685ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8b908ba64c3d6d1d45e1cab82e69176c3c84bf88dde3e9425fc7c3b4c84685ed\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae38c0781a4a8367bba95648253e72409e06cdc39825b137084fa6f4d33a4cb7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ae38c0781a4a8367bba95648253e72409e06cdc39825b137084fa6f4d33a4cb7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b663ffff34d80940880b3959c81519084b55bd5edcedbce9402d5fad1f898034\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b663ffff34d80940880b3959c81519084b55bd5edcedbce9402d5fad1f898034\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5b4df13fbe7bbb9ba49f772c2bd4bfe6a10a17bfa12bc500f0647659be7b0cdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5b4df13fbe7bbb9ba49f772c2bd4bfe6a10a17bfa12bc500f0647659be7b0cdf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-7fk5l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:30Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:30 crc kubenswrapper[4798]: I1011 03:55:30.720281 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:30Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:30 crc kubenswrapper[4798]: I1011 03:55:30.727941 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kszgd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e999fc8d-dd8d-415f-b72b-1c6189cf18a9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a92a11778001614d226c95af9dde787c78232568c3b5c073618a843f1225b21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h6wm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kszgd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:30Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:30 crc kubenswrapper[4798]: I1011 03:55:30.737761 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:30Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:30 crc kubenswrapper[4798]: I1011 03:55:30.750632 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bfd670e2-5a6d-4d56-97d6-c72b5474cfe6\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:05Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:05Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d79289a0b2f4d9eee836f57ba898bd329060194772e80855547f1982371b0921\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1970a648bf4ee991b9b58bfade524bee260b45f727cba5864a6545a10487b3ae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a79edde80930f3aad632c14d6ad6b3df6f8449f32107a82eb906785e873e1ab7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4b8e09f509a9f25922d49024be407873d79d01edb604193f9d19fcd6c89cf96d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://805f894634e76814dc3071315ddd3d8ac46cdf65a12e97f4ca56b98fe53ab25a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-11T03:55:19Z\\\",\\\"message\\\":\\\"W1011 03:55:08.564227 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1011 03:55:08.564630 1 crypto.go:601] Generating new CA for check-endpoints-signer@1760154908 cert, and key in /tmp/serving-cert-1527351980/serving-signer.crt, /tmp/serving-cert-1527351980/serving-signer.key\\\\nI1011 03:55:08.869630 1 observer_polling.go:159] Starting file observer\\\\nW1011 03:55:08.874583 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1011 03:55:08.874721 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1011 03:55:08.878058 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1527351980/tls.crt::/tmp/serving-cert-1527351980/tls.key\\\\\\\"\\\\nF1011 03:55:19.813421 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:08Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://58ca7c079d81ba990c4508c024d6db7c3503be1d628477798459b32e2374ef22\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:08Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1171e7b719c532d99d1d222fc5dbc434060d5d149bd2b6b3dea02f9716816b26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1171e7b719c532d99d1d222fc5dbc434060d5d149bd2b6b3dea02f9716816b26\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:05Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:30Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:30 crc kubenswrapper[4798]: I1011 03:55:30.762289 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2f4f8842-e378-44a9-b919-b1bf9a8df80e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://903e13c07c622087e30f21ffff2b60c31c852d8e994c23d7af6beab5a71bb873\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1087e7b8de486a192155ed94497edbca0dc98e4a84a577267fb132fc48e6b142\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://08a33c11cee6a48ebcf66f251111b8bd888bd40fe6b52f43a3768eac817ed59f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eff419f507fc5362e956419c6fd14dc6a161f7297b026a5ec449647051f98767\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:05Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:30Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:30 crc kubenswrapper[4798]: I1011 03:55:30.773524 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4936cbe98df337a2b4d0d6e3effe0afd990facff65daf923c475c9108e05fef9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:30Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:30 crc kubenswrapper[4798]: I1011 03:55:30.784435 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5ec61df088ad49a0392264c33da84b7324c4aa608201d71e309401ab80013fb8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://10697a88b117962307b632f70354acdfd493e92c2909908a31c90c754dcbc0ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:30Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:30 crc kubenswrapper[4798]: I1011 03:55:30.796595 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-p6xdd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bd9c0e44-3329-422a-907b-e9e9bb6194cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a46e4bfff29af67bd6304e68df8fb4aa7e2184648b85398f453fde878b27d825\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p9wst\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-p6xdd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:30Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:30 crc kubenswrapper[4798]: I1011 03:55:30.811108 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6129cc4f78a62c5fb76f4859ec954c8901954d27b7f05672e97d992e0c933191\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:30Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:31 crc kubenswrapper[4798]: I1011 03:55:31.422597 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 11 03:55:31 crc kubenswrapper[4798]: E1011 03:55:31.423115 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 11 03:55:31 crc kubenswrapper[4798]: I1011 03:55:31.422776 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 11 03:55:31 crc kubenswrapper[4798]: E1011 03:55:31.423210 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 11 03:55:31 crc kubenswrapper[4798]: I1011 03:55:31.422630 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 11 03:55:31 crc kubenswrapper[4798]: E1011 03:55:31.423271 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 11 03:55:31 crc kubenswrapper[4798]: I1011 03:55:31.481310 4798 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 11 03:55:31 crc kubenswrapper[4798]: I1011 03:55:31.484590 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:31 crc kubenswrapper[4798]: I1011 03:55:31.484640 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:31 crc kubenswrapper[4798]: I1011 03:55:31.484652 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:31 crc kubenswrapper[4798]: I1011 03:55:31.484804 4798 kubelet_node_status.go:76] "Attempting to register node" node="crc" Oct 11 03:55:31 crc kubenswrapper[4798]: I1011 03:55:31.493867 4798 kubelet_node_status.go:115] "Node was previously registered" node="crc" Oct 11 03:55:31 crc kubenswrapper[4798]: I1011 03:55:31.494175 4798 kubelet_node_status.go:79] "Successfully registered node" node="crc" Oct 11 03:55:31 crc kubenswrapper[4798]: I1011 03:55:31.495623 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:31 crc kubenswrapper[4798]: I1011 03:55:31.495724 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:31 crc kubenswrapper[4798]: I1011 03:55:31.495782 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:31 crc kubenswrapper[4798]: I1011 03:55:31.495845 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:31 crc kubenswrapper[4798]: I1011 03:55:31.495912 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:31Z","lastTransitionTime":"2025-10-11T03:55:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:31 crc kubenswrapper[4798]: E1011 03:55:31.520022 4798 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T03:55:31Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:31Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T03:55:31Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:31Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T03:55:31Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:31Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T03:55:31Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:31Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"319d317c-5958-4e72-8ec1-75f04665505c\\\",\\\"systemUUID\\\":\\\"914a85fd-a642-4471-b13e-f33764f494b0\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:31Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:31 crc kubenswrapper[4798]: I1011 03:55:31.524172 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:31 crc kubenswrapper[4798]: I1011 03:55:31.524225 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:31 crc kubenswrapper[4798]: I1011 03:55:31.524261 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:31 crc kubenswrapper[4798]: I1011 03:55:31.524284 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:31 crc kubenswrapper[4798]: I1011 03:55:31.524300 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:31Z","lastTransitionTime":"2025-10-11T03:55:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:31 crc kubenswrapper[4798]: E1011 03:55:31.542165 4798 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T03:55:31Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:31Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T03:55:31Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:31Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T03:55:31Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:31Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T03:55:31Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:31Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"319d317c-5958-4e72-8ec1-75f04665505c\\\",\\\"systemUUID\\\":\\\"914a85fd-a642-4471-b13e-f33764f494b0\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:31Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:31 crc kubenswrapper[4798]: I1011 03:55:31.547137 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:31 crc kubenswrapper[4798]: I1011 03:55:31.547202 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:31 crc kubenswrapper[4798]: I1011 03:55:31.547221 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:31 crc kubenswrapper[4798]: I1011 03:55:31.547246 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:31 crc kubenswrapper[4798]: I1011 03:55:31.547261 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:31Z","lastTransitionTime":"2025-10-11T03:55:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:31 crc kubenswrapper[4798]: E1011 03:55:31.559199 4798 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T03:55:31Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:31Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T03:55:31Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:31Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T03:55:31Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:31Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T03:55:31Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:31Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"319d317c-5958-4e72-8ec1-75f04665505c\\\",\\\"systemUUID\\\":\\\"914a85fd-a642-4471-b13e-f33764f494b0\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:31Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:31 crc kubenswrapper[4798]: I1011 03:55:31.563220 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:31 crc kubenswrapper[4798]: I1011 03:55:31.563264 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:31 crc kubenswrapper[4798]: I1011 03:55:31.563278 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:31 crc kubenswrapper[4798]: I1011 03:55:31.563301 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:31 crc kubenswrapper[4798]: I1011 03:55:31.563317 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:31Z","lastTransitionTime":"2025-10-11T03:55:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:31 crc kubenswrapper[4798]: E1011 03:55:31.578261 4798 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T03:55:31Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:31Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T03:55:31Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:31Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T03:55:31Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:31Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T03:55:31Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:31Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"319d317c-5958-4e72-8ec1-75f04665505c\\\",\\\"systemUUID\\\":\\\"914a85fd-a642-4471-b13e-f33764f494b0\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:31Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:31 crc kubenswrapper[4798]: I1011 03:55:31.582808 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:31 crc kubenswrapper[4798]: I1011 03:55:31.582856 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:31 crc kubenswrapper[4798]: I1011 03:55:31.582870 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:31 crc kubenswrapper[4798]: I1011 03:55:31.582896 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:31 crc kubenswrapper[4798]: I1011 03:55:31.582914 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:31Z","lastTransitionTime":"2025-10-11T03:55:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:31 crc kubenswrapper[4798]: E1011 03:55:31.599042 4798 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T03:55:31Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:31Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T03:55:31Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:31Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T03:55:31Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:31Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T03:55:31Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:31Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"319d317c-5958-4e72-8ec1-75f04665505c\\\",\\\"systemUUID\\\":\\\"914a85fd-a642-4471-b13e-f33764f494b0\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:31Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:31 crc kubenswrapper[4798]: E1011 03:55:31.599220 4798 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Oct 11 03:55:31 crc kubenswrapper[4798]: I1011 03:55:31.601207 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:31 crc kubenswrapper[4798]: I1011 03:55:31.601281 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:31 crc kubenswrapper[4798]: I1011 03:55:31.601300 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:31 crc kubenswrapper[4798]: I1011 03:55:31.601326 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:31 crc kubenswrapper[4798]: I1011 03:55:31.601347 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:31Z","lastTransitionTime":"2025-10-11T03:55:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:31 crc kubenswrapper[4798]: I1011 03:55:31.632514 4798 generic.go:334] "Generic (PLEG): container finished" podID="4b6a179c-1d23-4cb2-91b9-d34496a74456" containerID="997fd5172ccd393f7befe4343619dd30eba136428035ac9fc71f0ec99791a53b" exitCode=0 Oct 11 03:55:31 crc kubenswrapper[4798]: I1011 03:55:31.632653 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-7fk5l" event={"ID":"4b6a179c-1d23-4cb2-91b9-d34496a74456","Type":"ContainerDied","Data":"997fd5172ccd393f7befe4343619dd30eba136428035ac9fc71f0ec99791a53b"} Oct 11 03:55:31 crc kubenswrapper[4798]: I1011 03:55:31.649536 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6129cc4f78a62c5fb76f4859ec954c8901954d27b7f05672e97d992e0c933191\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:31Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:31 crc kubenswrapper[4798]: I1011 03:55:31.670520 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-p6xdd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bd9c0e44-3329-422a-907b-e9e9bb6194cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a46e4bfff29af67bd6304e68df8fb4aa7e2184648b85398f453fde878b27d825\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p9wst\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-p6xdd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:31Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:31 crc kubenswrapper[4798]: I1011 03:55:31.683408 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-cwk9h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a97c4ffd-df63-4ce6-8903-80849a23a9fa\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3dc58b162f1d2b3447282c149ec7cd6d60b4c53fa3cf51dfb4a821d115c9a2c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pfvbf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:28Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-cwk9h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:31Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:31 crc kubenswrapper[4798]: I1011 03:55:31.696999 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:31Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:31 crc kubenswrapper[4798]: I1011 03:55:31.703922 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:31 crc kubenswrapper[4798]: I1011 03:55:31.703952 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:31 crc kubenswrapper[4798]: I1011 03:55:31.703962 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:31 crc kubenswrapper[4798]: I1011 03:55:31.703977 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:31 crc kubenswrapper[4798]: I1011 03:55:31.703987 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:31Z","lastTransitionTime":"2025-10-11T03:55:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:31 crc kubenswrapper[4798]: I1011 03:55:31.712972 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42571bc8-2186-4e3b-bba9-28f5a8f364d0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c66c40f71f0877eee058976f94625fe1c4bcab5dfad363be39e6863c2e9f07a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5gdw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d4862faa12fa49bd57098b8b4e9e9dd287da8c8808a9ca32bfdd3a9b4313945d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5gdw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-h28s2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:31Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:31 crc kubenswrapper[4798]: I1011 03:55:31.727160 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-7fk5l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4b6a179c-1d23-4cb2-91b9-d34496a74456\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b908ba64c3d6d1d45e1cab82e69176c3c84bf88dde3e9425fc7c3b4c84685ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8b908ba64c3d6d1d45e1cab82e69176c3c84bf88dde3e9425fc7c3b4c84685ed\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae38c0781a4a8367bba95648253e72409e06cdc39825b137084fa6f4d33a4cb7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ae38c0781a4a8367bba95648253e72409e06cdc39825b137084fa6f4d33a4cb7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b663ffff34d80940880b3959c81519084b55bd5edcedbce9402d5fad1f898034\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b663ffff34d80940880b3959c81519084b55bd5edcedbce9402d5fad1f898034\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5b4df13fbe7bbb9ba49f772c2bd4bfe6a10a17bfa12bc500f0647659be7b0cdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5b4df13fbe7bbb9ba49f772c2bd4bfe6a10a17bfa12bc500f0647659be7b0cdf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://997fd5172ccd393f7befe4343619dd30eba136428035ac9fc71f0ec99791a53b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://997fd5172ccd393f7befe4343619dd30eba136428035ac9fc71f0ec99791a53b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-7fk5l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:31Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:31 crc kubenswrapper[4798]: I1011 03:55:31.754117 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-svt4z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3c2a342b-5252-4957-9e2c-8f81c304b8af\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9f8c139ffa0a5dc24b40f6d5bcc2fa9422f58b275692d3ce3ba5a928c1e454a7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f8c139ffa0a5dc24b40f6d5bcc2fa9422f58b275692d3ce3ba5a928c1e454a7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:26Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-svt4z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:31Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:31 crc kubenswrapper[4798]: I1011 03:55:31.766254 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kszgd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e999fc8d-dd8d-415f-b72b-1c6189cf18a9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a92a11778001614d226c95af9dde787c78232568c3b5c073618a843f1225b21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h6wm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kszgd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:31Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:31 crc kubenswrapper[4798]: I1011 03:55:31.779959 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:31Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:31 crc kubenswrapper[4798]: I1011 03:55:31.793244 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bfd670e2-5a6d-4d56-97d6-c72b5474cfe6\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:05Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:05Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d79289a0b2f4d9eee836f57ba898bd329060194772e80855547f1982371b0921\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1970a648bf4ee991b9b58bfade524bee260b45f727cba5864a6545a10487b3ae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a79edde80930f3aad632c14d6ad6b3df6f8449f32107a82eb906785e873e1ab7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4b8e09f509a9f25922d49024be407873d79d01edb604193f9d19fcd6c89cf96d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://805f894634e76814dc3071315ddd3d8ac46cdf65a12e97f4ca56b98fe53ab25a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-11T03:55:19Z\\\",\\\"message\\\":\\\"W1011 03:55:08.564227 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1011 03:55:08.564630 1 crypto.go:601] Generating new CA for check-endpoints-signer@1760154908 cert, and key in /tmp/serving-cert-1527351980/serving-signer.crt, /tmp/serving-cert-1527351980/serving-signer.key\\\\nI1011 03:55:08.869630 1 observer_polling.go:159] Starting file observer\\\\nW1011 03:55:08.874583 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1011 03:55:08.874721 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1011 03:55:08.878058 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1527351980/tls.crt::/tmp/serving-cert-1527351980/tls.key\\\\\\\"\\\\nF1011 03:55:19.813421 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:08Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://58ca7c079d81ba990c4508c024d6db7c3503be1d628477798459b32e2374ef22\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:08Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1171e7b719c532d99d1d222fc5dbc434060d5d149bd2b6b3dea02f9716816b26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1171e7b719c532d99d1d222fc5dbc434060d5d149bd2b6b3dea02f9716816b26\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:05Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:31Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:31 crc kubenswrapper[4798]: I1011 03:55:31.806461 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:31 crc kubenswrapper[4798]: I1011 03:55:31.806501 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:31 crc kubenswrapper[4798]: I1011 03:55:31.806511 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:31 crc kubenswrapper[4798]: I1011 03:55:31.806530 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:31 crc kubenswrapper[4798]: I1011 03:55:31.806542 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:31Z","lastTransitionTime":"2025-10-11T03:55:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:31 crc kubenswrapper[4798]: I1011 03:55:31.809188 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2f4f8842-e378-44a9-b919-b1bf9a8df80e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://903e13c07c622087e30f21ffff2b60c31c852d8e994c23d7af6beab5a71bb873\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1087e7b8de486a192155ed94497edbca0dc98e4a84a577267fb132fc48e6b142\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://08a33c11cee6a48ebcf66f251111b8bd888bd40fe6b52f43a3768eac817ed59f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eff419f507fc5362e956419c6fd14dc6a161f7297b026a5ec449647051f98767\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:05Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:31Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:31 crc kubenswrapper[4798]: I1011 03:55:31.824480 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4936cbe98df337a2b4d0d6e3effe0afd990facff65daf923c475c9108e05fef9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:31Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:31 crc kubenswrapper[4798]: I1011 03:55:31.839931 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:31Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:31 crc kubenswrapper[4798]: I1011 03:55:31.853848 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5ec61df088ad49a0392264c33da84b7324c4aa608201d71e309401ab80013fb8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://10697a88b117962307b632f70354acdfd493e92c2909908a31c90c754dcbc0ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:31Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:31 crc kubenswrapper[4798]: I1011 03:55:31.911814 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:31 crc kubenswrapper[4798]: I1011 03:55:31.911962 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:31 crc kubenswrapper[4798]: I1011 03:55:31.912038 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:31 crc kubenswrapper[4798]: I1011 03:55:31.912104 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:31 crc kubenswrapper[4798]: I1011 03:55:31.912174 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:31Z","lastTransitionTime":"2025-10-11T03:55:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:32 crc kubenswrapper[4798]: I1011 03:55:32.016021 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:32 crc kubenswrapper[4798]: I1011 03:55:32.016069 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:32 crc kubenswrapper[4798]: I1011 03:55:32.016082 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:32 crc kubenswrapper[4798]: I1011 03:55:32.016100 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:32 crc kubenswrapper[4798]: I1011 03:55:32.016110 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:32Z","lastTransitionTime":"2025-10-11T03:55:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:32 crc kubenswrapper[4798]: I1011 03:55:32.123796 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:32 crc kubenswrapper[4798]: I1011 03:55:32.123851 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:32 crc kubenswrapper[4798]: I1011 03:55:32.123904 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:32 crc kubenswrapper[4798]: I1011 03:55:32.123926 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:32 crc kubenswrapper[4798]: I1011 03:55:32.123943 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:32Z","lastTransitionTime":"2025-10-11T03:55:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:32 crc kubenswrapper[4798]: I1011 03:55:32.228800 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:32 crc kubenswrapper[4798]: I1011 03:55:32.228853 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:32 crc kubenswrapper[4798]: I1011 03:55:32.228864 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:32 crc kubenswrapper[4798]: I1011 03:55:32.228883 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:32 crc kubenswrapper[4798]: I1011 03:55:32.228894 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:32Z","lastTransitionTime":"2025-10-11T03:55:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:32 crc kubenswrapper[4798]: I1011 03:55:32.332843 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:32 crc kubenswrapper[4798]: I1011 03:55:32.332902 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:32 crc kubenswrapper[4798]: I1011 03:55:32.332918 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:32 crc kubenswrapper[4798]: I1011 03:55:32.332959 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:32 crc kubenswrapper[4798]: I1011 03:55:32.332975 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:32Z","lastTransitionTime":"2025-10-11T03:55:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:32 crc kubenswrapper[4798]: I1011 03:55:32.435977 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:32 crc kubenswrapper[4798]: I1011 03:55:32.436023 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:32 crc kubenswrapper[4798]: I1011 03:55:32.436034 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:32 crc kubenswrapper[4798]: I1011 03:55:32.436050 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:32 crc kubenswrapper[4798]: I1011 03:55:32.436063 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:32Z","lastTransitionTime":"2025-10-11T03:55:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:32 crc kubenswrapper[4798]: I1011 03:55:32.538923 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:32 crc kubenswrapper[4798]: I1011 03:55:32.538964 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:32 crc kubenswrapper[4798]: I1011 03:55:32.538977 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:32 crc kubenswrapper[4798]: I1011 03:55:32.538998 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:32 crc kubenswrapper[4798]: I1011 03:55:32.539011 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:32Z","lastTransitionTime":"2025-10-11T03:55:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:32 crc kubenswrapper[4798]: I1011 03:55:32.641464 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:32 crc kubenswrapper[4798]: I1011 03:55:32.641900 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:32 crc kubenswrapper[4798]: I1011 03:55:32.641912 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:32 crc kubenswrapper[4798]: I1011 03:55:32.641928 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:32 crc kubenswrapper[4798]: I1011 03:55:32.641937 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:32Z","lastTransitionTime":"2025-10-11T03:55:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:32 crc kubenswrapper[4798]: I1011 03:55:32.645558 4798 generic.go:334] "Generic (PLEG): container finished" podID="4b6a179c-1d23-4cb2-91b9-d34496a74456" containerID="20198b637812738041fd2e57cdb24097b8f92af726b1ad103fee423469a27136" exitCode=0 Oct 11 03:55:32 crc kubenswrapper[4798]: I1011 03:55:32.645587 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-7fk5l" event={"ID":"4b6a179c-1d23-4cb2-91b9-d34496a74456","Type":"ContainerDied","Data":"20198b637812738041fd2e57cdb24097b8f92af726b1ad103fee423469a27136"} Oct 11 03:55:32 crc kubenswrapper[4798]: I1011 03:55:32.663283 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6129cc4f78a62c5fb76f4859ec954c8901954d27b7f05672e97d992e0c933191\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:32Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:32 crc kubenswrapper[4798]: I1011 03:55:32.682008 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-p6xdd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bd9c0e44-3329-422a-907b-e9e9bb6194cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a46e4bfff29af67bd6304e68df8fb4aa7e2184648b85398f453fde878b27d825\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p9wst\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-p6xdd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:32Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:32 crc kubenswrapper[4798]: I1011 03:55:32.697628 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:32Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:32 crc kubenswrapper[4798]: I1011 03:55:32.713342 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42571bc8-2186-4e3b-bba9-28f5a8f364d0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c66c40f71f0877eee058976f94625fe1c4bcab5dfad363be39e6863c2e9f07a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5gdw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d4862faa12fa49bd57098b8b4e9e9dd287da8c8808a9ca32bfdd3a9b4313945d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5gdw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-h28s2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:32Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:32 crc kubenswrapper[4798]: I1011 03:55:32.729357 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-7fk5l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4b6a179c-1d23-4cb2-91b9-d34496a74456\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b908ba64c3d6d1d45e1cab82e69176c3c84bf88dde3e9425fc7c3b4c84685ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8b908ba64c3d6d1d45e1cab82e69176c3c84bf88dde3e9425fc7c3b4c84685ed\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae38c0781a4a8367bba95648253e72409e06cdc39825b137084fa6f4d33a4cb7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ae38c0781a4a8367bba95648253e72409e06cdc39825b137084fa6f4d33a4cb7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b663ffff34d80940880b3959c81519084b55bd5edcedbce9402d5fad1f898034\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b663ffff34d80940880b3959c81519084b55bd5edcedbce9402d5fad1f898034\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5b4df13fbe7bbb9ba49f772c2bd4bfe6a10a17bfa12bc500f0647659be7b0cdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5b4df13fbe7bbb9ba49f772c2bd4bfe6a10a17bfa12bc500f0647659be7b0cdf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://997fd5172ccd393f7befe4343619dd30eba136428035ac9fc71f0ec99791a53b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://997fd5172ccd393f7befe4343619dd30eba136428035ac9fc71f0ec99791a53b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://20198b637812738041fd2e57cdb24097b8f92af726b1ad103fee423469a27136\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://20198b637812738041fd2e57cdb24097b8f92af726b1ad103fee423469a27136\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-7fk5l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:32Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:32 crc kubenswrapper[4798]: I1011 03:55:32.744462 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:32 crc kubenswrapper[4798]: I1011 03:55:32.744491 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:32 crc kubenswrapper[4798]: I1011 03:55:32.744500 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:32 crc kubenswrapper[4798]: I1011 03:55:32.744516 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:32 crc kubenswrapper[4798]: I1011 03:55:32.744542 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:32Z","lastTransitionTime":"2025-10-11T03:55:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:32 crc kubenswrapper[4798]: I1011 03:55:32.750614 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-svt4z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3c2a342b-5252-4957-9e2c-8f81c304b8af\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9f8c139ffa0a5dc24b40f6d5bcc2fa9422f58b275692d3ce3ba5a928c1e454a7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f8c139ffa0a5dc24b40f6d5bcc2fa9422f58b275692d3ce3ba5a928c1e454a7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:26Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-svt4z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:32Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:32 crc kubenswrapper[4798]: I1011 03:55:32.766298 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-cwk9h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a97c4ffd-df63-4ce6-8903-80849a23a9fa\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3dc58b162f1d2b3447282c149ec7cd6d60b4c53fa3cf51dfb4a821d115c9a2c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pfvbf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:28Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-cwk9h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:32Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:32 crc kubenswrapper[4798]: I1011 03:55:32.785449 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bfd670e2-5a6d-4d56-97d6-c72b5474cfe6\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:05Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:05Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d79289a0b2f4d9eee836f57ba898bd329060194772e80855547f1982371b0921\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1970a648bf4ee991b9b58bfade524bee260b45f727cba5864a6545a10487b3ae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a79edde80930f3aad632c14d6ad6b3df6f8449f32107a82eb906785e873e1ab7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4b8e09f509a9f25922d49024be407873d79d01edb604193f9d19fcd6c89cf96d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://805f894634e76814dc3071315ddd3d8ac46cdf65a12e97f4ca56b98fe53ab25a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-11T03:55:19Z\\\",\\\"message\\\":\\\"W1011 03:55:08.564227 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1011 03:55:08.564630 1 crypto.go:601] Generating new CA for check-endpoints-signer@1760154908 cert, and key in /tmp/serving-cert-1527351980/serving-signer.crt, /tmp/serving-cert-1527351980/serving-signer.key\\\\nI1011 03:55:08.869630 1 observer_polling.go:159] Starting file observer\\\\nW1011 03:55:08.874583 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1011 03:55:08.874721 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1011 03:55:08.878058 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1527351980/tls.crt::/tmp/serving-cert-1527351980/tls.key\\\\\\\"\\\\nF1011 03:55:19.813421 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:08Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://58ca7c079d81ba990c4508c024d6db7c3503be1d628477798459b32e2374ef22\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:08Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1171e7b719c532d99d1d222fc5dbc434060d5d149bd2b6b3dea02f9716816b26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1171e7b719c532d99d1d222fc5dbc434060d5d149bd2b6b3dea02f9716816b26\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:05Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:32Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:32 crc kubenswrapper[4798]: I1011 03:55:32.800473 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2f4f8842-e378-44a9-b919-b1bf9a8df80e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://903e13c07c622087e30f21ffff2b60c31c852d8e994c23d7af6beab5a71bb873\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1087e7b8de486a192155ed94497edbca0dc98e4a84a577267fb132fc48e6b142\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://08a33c11cee6a48ebcf66f251111b8bd888bd40fe6b52f43a3768eac817ed59f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eff419f507fc5362e956419c6fd14dc6a161f7297b026a5ec449647051f98767\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:05Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:32Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:32 crc kubenswrapper[4798]: I1011 03:55:32.814589 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4936cbe98df337a2b4d0d6e3effe0afd990facff65daf923c475c9108e05fef9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:32Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:32 crc kubenswrapper[4798]: I1011 03:55:32.829542 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:32Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:32 crc kubenswrapper[4798]: I1011 03:55:32.840882 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kszgd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e999fc8d-dd8d-415f-b72b-1c6189cf18a9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a92a11778001614d226c95af9dde787c78232568c3b5c073618a843f1225b21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h6wm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kszgd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:32Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:32 crc kubenswrapper[4798]: I1011 03:55:32.846470 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:32 crc kubenswrapper[4798]: I1011 03:55:32.846513 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:32 crc kubenswrapper[4798]: I1011 03:55:32.846525 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:32 crc kubenswrapper[4798]: I1011 03:55:32.846543 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:32 crc kubenswrapper[4798]: I1011 03:55:32.846554 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:32Z","lastTransitionTime":"2025-10-11T03:55:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:32 crc kubenswrapper[4798]: I1011 03:55:32.854593 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:32Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:32 crc kubenswrapper[4798]: I1011 03:55:32.870669 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5ec61df088ad49a0392264c33da84b7324c4aa608201d71e309401ab80013fb8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://10697a88b117962307b632f70354acdfd493e92c2909908a31c90c754dcbc0ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:32Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:32 crc kubenswrapper[4798]: I1011 03:55:32.949529 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:32 crc kubenswrapper[4798]: I1011 03:55:32.949579 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:32 crc kubenswrapper[4798]: I1011 03:55:32.949594 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:32 crc kubenswrapper[4798]: I1011 03:55:32.949610 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:32 crc kubenswrapper[4798]: I1011 03:55:32.949618 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:32Z","lastTransitionTime":"2025-10-11T03:55:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:33 crc kubenswrapper[4798]: I1011 03:55:33.044900 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 11 03:55:33 crc kubenswrapper[4798]: I1011 03:55:33.044948 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 11 03:55:33 crc kubenswrapper[4798]: I1011 03:55:33.044966 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 11 03:55:33 crc kubenswrapper[4798]: I1011 03:55:33.044987 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 11 03:55:33 crc kubenswrapper[4798]: E1011 03:55:33.045063 4798 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Oct 11 03:55:33 crc kubenswrapper[4798]: E1011 03:55:33.045112 4798 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-10-11 03:55:41.045095829 +0000 UTC m=+36.381385515 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Oct 11 03:55:33 crc kubenswrapper[4798]: E1011 03:55:33.045152 4798 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Oct 11 03:55:33 crc kubenswrapper[4798]: E1011 03:55:33.045277 4798 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Oct 11 03:55:33 crc kubenswrapper[4798]: E1011 03:55:33.045309 4798 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Oct 11 03:55:33 crc kubenswrapper[4798]: E1011 03:55:33.045368 4798 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Oct 11 03:55:33 crc kubenswrapper[4798]: E1011 03:55:33.045280 4798 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-10-11 03:55:41.045247762 +0000 UTC m=+36.381537448 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Oct 11 03:55:33 crc kubenswrapper[4798]: E1011 03:55:33.045176 4798 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Oct 11 03:55:33 crc kubenswrapper[4798]: E1011 03:55:33.045435 4798 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Oct 11 03:55:33 crc kubenswrapper[4798]: E1011 03:55:33.045451 4798 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Oct 11 03:55:33 crc kubenswrapper[4798]: E1011 03:55:33.045455 4798 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-10-11 03:55:41.045432246 +0000 UTC m=+36.381722122 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Oct 11 03:55:33 crc kubenswrapper[4798]: E1011 03:55:33.045487 4798 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-10-11 03:55:41.045476437 +0000 UTC m=+36.381766343 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Oct 11 03:55:33 crc kubenswrapper[4798]: I1011 03:55:33.052717 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:33 crc kubenswrapper[4798]: I1011 03:55:33.052764 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:33 crc kubenswrapper[4798]: I1011 03:55:33.052777 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:33 crc kubenswrapper[4798]: I1011 03:55:33.052798 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:33 crc kubenswrapper[4798]: I1011 03:55:33.052812 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:33Z","lastTransitionTime":"2025-10-11T03:55:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:33 crc kubenswrapper[4798]: I1011 03:55:33.146605 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 03:55:33 crc kubenswrapper[4798]: E1011 03:55:33.146774 4798 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-11 03:55:41.14674913 +0000 UTC m=+36.483038816 (durationBeforeRetry 8s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 03:55:33 crc kubenswrapper[4798]: I1011 03:55:33.156316 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:33 crc kubenswrapper[4798]: I1011 03:55:33.156435 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:33 crc kubenswrapper[4798]: I1011 03:55:33.156462 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:33 crc kubenswrapper[4798]: I1011 03:55:33.156501 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:33 crc kubenswrapper[4798]: I1011 03:55:33.156528 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:33Z","lastTransitionTime":"2025-10-11T03:55:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:33 crc kubenswrapper[4798]: I1011 03:55:33.263138 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:33 crc kubenswrapper[4798]: I1011 03:55:33.263195 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:33 crc kubenswrapper[4798]: I1011 03:55:33.263214 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:33 crc kubenswrapper[4798]: I1011 03:55:33.263236 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:33 crc kubenswrapper[4798]: I1011 03:55:33.263261 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:33Z","lastTransitionTime":"2025-10-11T03:55:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:33 crc kubenswrapper[4798]: I1011 03:55:33.366575 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:33 crc kubenswrapper[4798]: I1011 03:55:33.366635 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:33 crc kubenswrapper[4798]: I1011 03:55:33.366653 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:33 crc kubenswrapper[4798]: I1011 03:55:33.366679 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:33 crc kubenswrapper[4798]: I1011 03:55:33.366697 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:33Z","lastTransitionTime":"2025-10-11T03:55:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:33 crc kubenswrapper[4798]: I1011 03:55:33.423217 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 11 03:55:33 crc kubenswrapper[4798]: E1011 03:55:33.423606 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 11 03:55:33 crc kubenswrapper[4798]: I1011 03:55:33.424496 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 11 03:55:33 crc kubenswrapper[4798]: E1011 03:55:33.424669 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 11 03:55:33 crc kubenswrapper[4798]: I1011 03:55:33.424767 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 11 03:55:33 crc kubenswrapper[4798]: E1011 03:55:33.424880 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 11 03:55:33 crc kubenswrapper[4798]: I1011 03:55:33.469850 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:33 crc kubenswrapper[4798]: I1011 03:55:33.469927 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:33 crc kubenswrapper[4798]: I1011 03:55:33.469944 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:33 crc kubenswrapper[4798]: I1011 03:55:33.469964 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:33 crc kubenswrapper[4798]: I1011 03:55:33.469975 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:33Z","lastTransitionTime":"2025-10-11T03:55:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:33 crc kubenswrapper[4798]: I1011 03:55:33.573240 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:33 crc kubenswrapper[4798]: I1011 03:55:33.573302 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:33 crc kubenswrapper[4798]: I1011 03:55:33.573315 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:33 crc kubenswrapper[4798]: I1011 03:55:33.573339 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:33 crc kubenswrapper[4798]: I1011 03:55:33.573357 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:33Z","lastTransitionTime":"2025-10-11T03:55:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:33 crc kubenswrapper[4798]: I1011 03:55:33.654958 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-svt4z" event={"ID":"3c2a342b-5252-4957-9e2c-8f81c304b8af","Type":"ContainerStarted","Data":"f74df615a5e83aeb9df24b50315da33c1d6d05d5c25198463d48b0463966d797"} Oct 11 03:55:33 crc kubenswrapper[4798]: I1011 03:55:33.656259 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-svt4z" Oct 11 03:55:33 crc kubenswrapper[4798]: I1011 03:55:33.656356 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-svt4z" Oct 11 03:55:33 crc kubenswrapper[4798]: I1011 03:55:33.669983 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-7fk5l" event={"ID":"4b6a179c-1d23-4cb2-91b9-d34496a74456","Type":"ContainerStarted","Data":"5bfabccc875a12072ad81dc6bf74d849c99baa004c10d213f798c363248137db"} Oct 11 03:55:33 crc kubenswrapper[4798]: I1011 03:55:33.673793 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5ec61df088ad49a0392264c33da84b7324c4aa608201d71e309401ab80013fb8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://10697a88b117962307b632f70354acdfd493e92c2909908a31c90c754dcbc0ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:33Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:33 crc kubenswrapper[4798]: I1011 03:55:33.675419 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:33 crc kubenswrapper[4798]: I1011 03:55:33.675530 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:33 crc kubenswrapper[4798]: I1011 03:55:33.675607 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:33 crc kubenswrapper[4798]: I1011 03:55:33.675682 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:33 crc kubenswrapper[4798]: I1011 03:55:33.675779 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:33Z","lastTransitionTime":"2025-10-11T03:55:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:33 crc kubenswrapper[4798]: I1011 03:55:33.689657 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6129cc4f78a62c5fb76f4859ec954c8901954d27b7f05672e97d992e0c933191\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:33Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:33 crc kubenswrapper[4798]: I1011 03:55:33.691485 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-svt4z" Oct 11 03:55:33 crc kubenswrapper[4798]: I1011 03:55:33.692505 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-svt4z" Oct 11 03:55:33 crc kubenswrapper[4798]: I1011 03:55:33.703022 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-p6xdd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bd9c0e44-3329-422a-907b-e9e9bb6194cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a46e4bfff29af67bd6304e68df8fb4aa7e2184648b85398f453fde878b27d825\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p9wst\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-p6xdd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:33Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:33 crc kubenswrapper[4798]: I1011 03:55:33.714561 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42571bc8-2186-4e3b-bba9-28f5a8f364d0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c66c40f71f0877eee058976f94625fe1c4bcab5dfad363be39e6863c2e9f07a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5gdw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d4862faa12fa49bd57098b8b4e9e9dd287da8c8808a9ca32bfdd3a9b4313945d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5gdw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-h28s2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:33Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:33 crc kubenswrapper[4798]: I1011 03:55:33.729069 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-7fk5l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4b6a179c-1d23-4cb2-91b9-d34496a74456\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b908ba64c3d6d1d45e1cab82e69176c3c84bf88dde3e9425fc7c3b4c84685ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8b908ba64c3d6d1d45e1cab82e69176c3c84bf88dde3e9425fc7c3b4c84685ed\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae38c0781a4a8367bba95648253e72409e06cdc39825b137084fa6f4d33a4cb7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ae38c0781a4a8367bba95648253e72409e06cdc39825b137084fa6f4d33a4cb7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b663ffff34d80940880b3959c81519084b55bd5edcedbce9402d5fad1f898034\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b663ffff34d80940880b3959c81519084b55bd5edcedbce9402d5fad1f898034\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5b4df13fbe7bbb9ba49f772c2bd4bfe6a10a17bfa12bc500f0647659be7b0cdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5b4df13fbe7bbb9ba49f772c2bd4bfe6a10a17bfa12bc500f0647659be7b0cdf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://997fd5172ccd393f7befe4343619dd30eba136428035ac9fc71f0ec99791a53b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://997fd5172ccd393f7befe4343619dd30eba136428035ac9fc71f0ec99791a53b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://20198b637812738041fd2e57cdb24097b8f92af726b1ad103fee423469a27136\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://20198b637812738041fd2e57cdb24097b8f92af726b1ad103fee423469a27136\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-7fk5l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:33Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:33 crc kubenswrapper[4798]: I1011 03:55:33.746999 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-svt4z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3c2a342b-5252-4957-9e2c-8f81c304b8af\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5a66de04a274f07f090f66d9da727e1985f9c171038de366b409a0300d1b6cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5667e3d7288833ff8c576f64f111c3a2f59619a3577f969d0afdd633962bd30d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dd63d88fe2ebe6035765ee3f3c05c2ef747f9b2869da239825c08667935c9fb5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://671945e25068ff79ab8574dd96b2d81646b8240da41f34a10aa857f0c2520df2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://337fb496a29d8027f6400b57b1625b68029a16e368139f753b9b94df22448477\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c9529e6c53d42e111799753c41ef620b382b2a2b1991aa71d4ee846029a5136\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f74df615a5e83aeb9df24b50315da33c1d6d05d5c25198463d48b0463966d797\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://732433bbe785758ed4cbedf667608c4780a5d1e7e2eafd432735f0d74ea01d48\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9f8c139ffa0a5dc24b40f6d5bcc2fa9422f58b275692d3ce3ba5a928c1e454a7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f8c139ffa0a5dc24b40f6d5bcc2fa9422f58b275692d3ce3ba5a928c1e454a7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:26Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-svt4z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:33Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:33 crc kubenswrapper[4798]: I1011 03:55:33.762659 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-cwk9h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a97c4ffd-df63-4ce6-8903-80849a23a9fa\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3dc58b162f1d2b3447282c149ec7cd6d60b4c53fa3cf51dfb4a821d115c9a2c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pfvbf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:28Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-cwk9h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:33Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:33 crc kubenswrapper[4798]: I1011 03:55:33.778844 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:33Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:33 crc kubenswrapper[4798]: I1011 03:55:33.779178 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:33 crc kubenswrapper[4798]: I1011 03:55:33.779219 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:33 crc kubenswrapper[4798]: I1011 03:55:33.779233 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:33 crc kubenswrapper[4798]: I1011 03:55:33.779250 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:33 crc kubenswrapper[4798]: I1011 03:55:33.779261 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:33Z","lastTransitionTime":"2025-10-11T03:55:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:33 crc kubenswrapper[4798]: I1011 03:55:33.792063 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2f4f8842-e378-44a9-b919-b1bf9a8df80e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://903e13c07c622087e30f21ffff2b60c31c852d8e994c23d7af6beab5a71bb873\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1087e7b8de486a192155ed94497edbca0dc98e4a84a577267fb132fc48e6b142\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://08a33c11cee6a48ebcf66f251111b8bd888bd40fe6b52f43a3768eac817ed59f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eff419f507fc5362e956419c6fd14dc6a161f7297b026a5ec449647051f98767\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:05Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:33Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:33 crc kubenswrapper[4798]: I1011 03:55:33.805189 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4936cbe98df337a2b4d0d6e3effe0afd990facff65daf923c475c9108e05fef9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:33Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:33 crc kubenswrapper[4798]: I1011 03:55:33.818314 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:33Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:33 crc kubenswrapper[4798]: I1011 03:55:33.829330 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kszgd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e999fc8d-dd8d-415f-b72b-1c6189cf18a9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a92a11778001614d226c95af9dde787c78232568c3b5c073618a843f1225b21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h6wm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kszgd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:33Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:33 crc kubenswrapper[4798]: I1011 03:55:33.844218 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:33Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:33 crc kubenswrapper[4798]: I1011 03:55:33.861772 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bfd670e2-5a6d-4d56-97d6-c72b5474cfe6\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:05Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:05Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d79289a0b2f4d9eee836f57ba898bd329060194772e80855547f1982371b0921\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1970a648bf4ee991b9b58bfade524bee260b45f727cba5864a6545a10487b3ae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a79edde80930f3aad632c14d6ad6b3df6f8449f32107a82eb906785e873e1ab7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4b8e09f509a9f25922d49024be407873d79d01edb604193f9d19fcd6c89cf96d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://805f894634e76814dc3071315ddd3d8ac46cdf65a12e97f4ca56b98fe53ab25a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-11T03:55:19Z\\\",\\\"message\\\":\\\"W1011 03:55:08.564227 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1011 03:55:08.564630 1 crypto.go:601] Generating new CA for check-endpoints-signer@1760154908 cert, and key in /tmp/serving-cert-1527351980/serving-signer.crt, /tmp/serving-cert-1527351980/serving-signer.key\\\\nI1011 03:55:08.869630 1 observer_polling.go:159] Starting file observer\\\\nW1011 03:55:08.874583 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1011 03:55:08.874721 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1011 03:55:08.878058 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1527351980/tls.crt::/tmp/serving-cert-1527351980/tls.key\\\\\\\"\\\\nF1011 03:55:19.813421 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:08Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://58ca7c079d81ba990c4508c024d6db7c3503be1d628477798459b32e2374ef22\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:08Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1171e7b719c532d99d1d222fc5dbc434060d5d149bd2b6b3dea02f9716816b26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1171e7b719c532d99d1d222fc5dbc434060d5d149bd2b6b3dea02f9716816b26\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:05Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:33Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:33 crc kubenswrapper[4798]: I1011 03:55:33.876184 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6129cc4f78a62c5fb76f4859ec954c8901954d27b7f05672e97d992e0c933191\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:33Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:33 crc kubenswrapper[4798]: I1011 03:55:33.882393 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:33 crc kubenswrapper[4798]: I1011 03:55:33.882456 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:33 crc kubenswrapper[4798]: I1011 03:55:33.882471 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:33 crc kubenswrapper[4798]: I1011 03:55:33.882494 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:33 crc kubenswrapper[4798]: I1011 03:55:33.882506 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:33Z","lastTransitionTime":"2025-10-11T03:55:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:33 crc kubenswrapper[4798]: I1011 03:55:33.893332 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-p6xdd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bd9c0e44-3329-422a-907b-e9e9bb6194cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a46e4bfff29af67bd6304e68df8fb4aa7e2184648b85398f453fde878b27d825\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p9wst\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-p6xdd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:33Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:33 crc kubenswrapper[4798]: I1011 03:55:33.911778 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:33Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:33 crc kubenswrapper[4798]: I1011 03:55:33.926253 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42571bc8-2186-4e3b-bba9-28f5a8f364d0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c66c40f71f0877eee058976f94625fe1c4bcab5dfad363be39e6863c2e9f07a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5gdw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d4862faa12fa49bd57098b8b4e9e9dd287da8c8808a9ca32bfdd3a9b4313945d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5gdw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-h28s2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:33Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:33 crc kubenswrapper[4798]: I1011 03:55:33.942224 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-7fk5l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4b6a179c-1d23-4cb2-91b9-d34496a74456\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5bfabccc875a12072ad81dc6bf74d849c99baa004c10d213f798c363248137db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b908ba64c3d6d1d45e1cab82e69176c3c84bf88dde3e9425fc7c3b4c84685ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8b908ba64c3d6d1d45e1cab82e69176c3c84bf88dde3e9425fc7c3b4c84685ed\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae38c0781a4a8367bba95648253e72409e06cdc39825b137084fa6f4d33a4cb7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ae38c0781a4a8367bba95648253e72409e06cdc39825b137084fa6f4d33a4cb7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b663ffff34d80940880b3959c81519084b55bd5edcedbce9402d5fad1f898034\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b663ffff34d80940880b3959c81519084b55bd5edcedbce9402d5fad1f898034\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5b4df13fbe7bbb9ba49f772c2bd4bfe6a10a17bfa12bc500f0647659be7b0cdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5b4df13fbe7bbb9ba49f772c2bd4bfe6a10a17bfa12bc500f0647659be7b0cdf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://997fd5172ccd393f7befe4343619dd30eba136428035ac9fc71f0ec99791a53b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://997fd5172ccd393f7befe4343619dd30eba136428035ac9fc71f0ec99791a53b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://20198b637812738041fd2e57cdb24097b8f92af726b1ad103fee423469a27136\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://20198b637812738041fd2e57cdb24097b8f92af726b1ad103fee423469a27136\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-7fk5l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:33Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:33 crc kubenswrapper[4798]: I1011 03:55:33.961891 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-svt4z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3c2a342b-5252-4957-9e2c-8f81c304b8af\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5a66de04a274f07f090f66d9da727e1985f9c171038de366b409a0300d1b6cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5667e3d7288833ff8c576f64f111c3a2f59619a3577f969d0afdd633962bd30d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dd63d88fe2ebe6035765ee3f3c05c2ef747f9b2869da239825c08667935c9fb5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://671945e25068ff79ab8574dd96b2d81646b8240da41f34a10aa857f0c2520df2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://337fb496a29d8027f6400b57b1625b68029a16e368139f753b9b94df22448477\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c9529e6c53d42e111799753c41ef620b382b2a2b1991aa71d4ee846029a5136\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f74df615a5e83aeb9df24b50315da33c1d6d05d5c25198463d48b0463966d797\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://732433bbe785758ed4cbedf667608c4780a5d1e7e2eafd432735f0d74ea01d48\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9f8c139ffa0a5dc24b40f6d5bcc2fa9422f58b275692d3ce3ba5a928c1e454a7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f8c139ffa0a5dc24b40f6d5bcc2fa9422f58b275692d3ce3ba5a928c1e454a7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:26Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-svt4z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:33Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:33 crc kubenswrapper[4798]: I1011 03:55:33.976905 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-cwk9h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a97c4ffd-df63-4ce6-8903-80849a23a9fa\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3dc58b162f1d2b3447282c149ec7cd6d60b4c53fa3cf51dfb4a821d115c9a2c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pfvbf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:28Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-cwk9h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:33Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:33 crc kubenswrapper[4798]: I1011 03:55:33.985798 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:33 crc kubenswrapper[4798]: I1011 03:55:33.985834 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:33 crc kubenswrapper[4798]: I1011 03:55:33.985843 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:33 crc kubenswrapper[4798]: I1011 03:55:33.985858 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:33 crc kubenswrapper[4798]: I1011 03:55:33.985869 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:33Z","lastTransitionTime":"2025-10-11T03:55:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:33 crc kubenswrapper[4798]: I1011 03:55:33.991589 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bfd670e2-5a6d-4d56-97d6-c72b5474cfe6\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:05Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:05Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d79289a0b2f4d9eee836f57ba898bd329060194772e80855547f1982371b0921\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1970a648bf4ee991b9b58bfade524bee260b45f727cba5864a6545a10487b3ae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a79edde80930f3aad632c14d6ad6b3df6f8449f32107a82eb906785e873e1ab7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4b8e09f509a9f25922d49024be407873d79d01edb604193f9d19fcd6c89cf96d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://805f894634e76814dc3071315ddd3d8ac46cdf65a12e97f4ca56b98fe53ab25a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-11T03:55:19Z\\\",\\\"message\\\":\\\"W1011 03:55:08.564227 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1011 03:55:08.564630 1 crypto.go:601] Generating new CA for check-endpoints-signer@1760154908 cert, and key in /tmp/serving-cert-1527351980/serving-signer.crt, /tmp/serving-cert-1527351980/serving-signer.key\\\\nI1011 03:55:08.869630 1 observer_polling.go:159] Starting file observer\\\\nW1011 03:55:08.874583 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1011 03:55:08.874721 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1011 03:55:08.878058 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1527351980/tls.crt::/tmp/serving-cert-1527351980/tls.key\\\\\\\"\\\\nF1011 03:55:19.813421 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:08Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://58ca7c079d81ba990c4508c024d6db7c3503be1d628477798459b32e2374ef22\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:08Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1171e7b719c532d99d1d222fc5dbc434060d5d149bd2b6b3dea02f9716816b26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1171e7b719c532d99d1d222fc5dbc434060d5d149bd2b6b3dea02f9716816b26\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:05Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:33Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:34 crc kubenswrapper[4798]: I1011 03:55:34.009228 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2f4f8842-e378-44a9-b919-b1bf9a8df80e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://903e13c07c622087e30f21ffff2b60c31c852d8e994c23d7af6beab5a71bb873\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1087e7b8de486a192155ed94497edbca0dc98e4a84a577267fb132fc48e6b142\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://08a33c11cee6a48ebcf66f251111b8bd888bd40fe6b52f43a3768eac817ed59f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eff419f507fc5362e956419c6fd14dc6a161f7297b026a5ec449647051f98767\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:05Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:34Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:34 crc kubenswrapper[4798]: I1011 03:55:34.021780 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4936cbe98df337a2b4d0d6e3effe0afd990facff65daf923c475c9108e05fef9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:34Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:34 crc kubenswrapper[4798]: I1011 03:55:34.038003 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:34Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:34 crc kubenswrapper[4798]: I1011 03:55:34.048651 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kszgd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e999fc8d-dd8d-415f-b72b-1c6189cf18a9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a92a11778001614d226c95af9dde787c78232568c3b5c073618a843f1225b21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h6wm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kszgd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:34Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:34 crc kubenswrapper[4798]: I1011 03:55:34.063050 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:34Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:34 crc kubenswrapper[4798]: I1011 03:55:34.077269 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5ec61df088ad49a0392264c33da84b7324c4aa608201d71e309401ab80013fb8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://10697a88b117962307b632f70354acdfd493e92c2909908a31c90c754dcbc0ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:34Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:34 crc kubenswrapper[4798]: I1011 03:55:34.088152 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:34 crc kubenswrapper[4798]: I1011 03:55:34.088195 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:34 crc kubenswrapper[4798]: I1011 03:55:34.088203 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:34 crc kubenswrapper[4798]: I1011 03:55:34.088218 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:34 crc kubenswrapper[4798]: I1011 03:55:34.088227 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:34Z","lastTransitionTime":"2025-10-11T03:55:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:34 crc kubenswrapper[4798]: I1011 03:55:34.191058 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:34 crc kubenswrapper[4798]: I1011 03:55:34.191418 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:34 crc kubenswrapper[4798]: I1011 03:55:34.191498 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:34 crc kubenswrapper[4798]: I1011 03:55:34.191568 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:34 crc kubenswrapper[4798]: I1011 03:55:34.191638 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:34Z","lastTransitionTime":"2025-10-11T03:55:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:34 crc kubenswrapper[4798]: I1011 03:55:34.294717 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:34 crc kubenswrapper[4798]: I1011 03:55:34.295067 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:34 crc kubenswrapper[4798]: I1011 03:55:34.295224 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:34 crc kubenswrapper[4798]: I1011 03:55:34.295371 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:34 crc kubenswrapper[4798]: I1011 03:55:34.295584 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:34Z","lastTransitionTime":"2025-10-11T03:55:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:34 crc kubenswrapper[4798]: I1011 03:55:34.397767 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:34 crc kubenswrapper[4798]: I1011 03:55:34.397894 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:34 crc kubenswrapper[4798]: I1011 03:55:34.397914 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:34 crc kubenswrapper[4798]: I1011 03:55:34.397934 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:34 crc kubenswrapper[4798]: I1011 03:55:34.397946 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:34Z","lastTransitionTime":"2025-10-11T03:55:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:34 crc kubenswrapper[4798]: I1011 03:55:34.499888 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:34 crc kubenswrapper[4798]: I1011 03:55:34.499938 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:34 crc kubenswrapper[4798]: I1011 03:55:34.499953 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:34 crc kubenswrapper[4798]: I1011 03:55:34.499973 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:34 crc kubenswrapper[4798]: I1011 03:55:34.499989 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:34Z","lastTransitionTime":"2025-10-11T03:55:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:34 crc kubenswrapper[4798]: I1011 03:55:34.602463 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:34 crc kubenswrapper[4798]: I1011 03:55:34.602527 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:34 crc kubenswrapper[4798]: I1011 03:55:34.602544 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:34 crc kubenswrapper[4798]: I1011 03:55:34.602568 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:34 crc kubenswrapper[4798]: I1011 03:55:34.602586 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:34Z","lastTransitionTime":"2025-10-11T03:55:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:34 crc kubenswrapper[4798]: I1011 03:55:34.674477 4798 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Oct 11 03:55:34 crc kubenswrapper[4798]: I1011 03:55:34.705599 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:34 crc kubenswrapper[4798]: I1011 03:55:34.705652 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:34 crc kubenswrapper[4798]: I1011 03:55:34.705668 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:34 crc kubenswrapper[4798]: I1011 03:55:34.705685 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:34 crc kubenswrapper[4798]: I1011 03:55:34.705699 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:34Z","lastTransitionTime":"2025-10-11T03:55:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:34 crc kubenswrapper[4798]: I1011 03:55:34.808582 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:34 crc kubenswrapper[4798]: I1011 03:55:34.808624 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:34 crc kubenswrapper[4798]: I1011 03:55:34.808633 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:34 crc kubenswrapper[4798]: I1011 03:55:34.808648 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:34 crc kubenswrapper[4798]: I1011 03:55:34.808658 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:34Z","lastTransitionTime":"2025-10-11T03:55:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:34 crc kubenswrapper[4798]: I1011 03:55:34.911075 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:34 crc kubenswrapper[4798]: I1011 03:55:34.911313 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:34 crc kubenswrapper[4798]: I1011 03:55:34.911388 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:34 crc kubenswrapper[4798]: I1011 03:55:34.911514 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:34 crc kubenswrapper[4798]: I1011 03:55:34.911588 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:34Z","lastTransitionTime":"2025-10-11T03:55:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:35 crc kubenswrapper[4798]: I1011 03:55:35.013976 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:35 crc kubenswrapper[4798]: I1011 03:55:35.014288 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:35 crc kubenswrapper[4798]: I1011 03:55:35.014405 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:35 crc kubenswrapper[4798]: I1011 03:55:35.014484 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:35 crc kubenswrapper[4798]: I1011 03:55:35.014557 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:35Z","lastTransitionTime":"2025-10-11T03:55:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:35 crc kubenswrapper[4798]: I1011 03:55:35.116935 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:35 crc kubenswrapper[4798]: I1011 03:55:35.117215 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:35 crc kubenswrapper[4798]: I1011 03:55:35.117343 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:35 crc kubenswrapper[4798]: I1011 03:55:35.117491 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:35 crc kubenswrapper[4798]: I1011 03:55:35.117590 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:35Z","lastTransitionTime":"2025-10-11T03:55:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:35 crc kubenswrapper[4798]: I1011 03:55:35.220633 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:35 crc kubenswrapper[4798]: I1011 03:55:35.220688 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:35 crc kubenswrapper[4798]: I1011 03:55:35.220702 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:35 crc kubenswrapper[4798]: I1011 03:55:35.220720 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:35 crc kubenswrapper[4798]: I1011 03:55:35.220732 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:35Z","lastTransitionTime":"2025-10-11T03:55:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:35 crc kubenswrapper[4798]: I1011 03:55:35.323345 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:35 crc kubenswrapper[4798]: I1011 03:55:35.323641 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:35 crc kubenswrapper[4798]: I1011 03:55:35.323717 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:35 crc kubenswrapper[4798]: I1011 03:55:35.323785 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:35 crc kubenswrapper[4798]: I1011 03:55:35.323848 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:35Z","lastTransitionTime":"2025-10-11T03:55:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:35 crc kubenswrapper[4798]: I1011 03:55:35.406503 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Oct 11 03:55:35 crc kubenswrapper[4798]: I1011 03:55:35.419547 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:35Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:35 crc kubenswrapper[4798]: I1011 03:55:35.422498 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 11 03:55:35 crc kubenswrapper[4798]: I1011 03:55:35.422503 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 11 03:55:35 crc kubenswrapper[4798]: I1011 03:55:35.422907 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 11 03:55:35 crc kubenswrapper[4798]: E1011 03:55:35.423009 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 11 03:55:35 crc kubenswrapper[4798]: E1011 03:55:35.423158 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 11 03:55:35 crc kubenswrapper[4798]: E1011 03:55:35.423376 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 11 03:55:35 crc kubenswrapper[4798]: I1011 03:55:35.425931 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:35 crc kubenswrapper[4798]: I1011 03:55:35.425967 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:35 crc kubenswrapper[4798]: I1011 03:55:35.425998 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:35 crc kubenswrapper[4798]: I1011 03:55:35.426017 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:35 crc kubenswrapper[4798]: I1011 03:55:35.426029 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:35Z","lastTransitionTime":"2025-10-11T03:55:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:35 crc kubenswrapper[4798]: I1011 03:55:35.437791 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42571bc8-2186-4e3b-bba9-28f5a8f364d0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c66c40f71f0877eee058976f94625fe1c4bcab5dfad363be39e6863c2e9f07a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5gdw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d4862faa12fa49bd57098b8b4e9e9dd287da8c8808a9ca32bfdd3a9b4313945d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5gdw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-h28s2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:35Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:35 crc kubenswrapper[4798]: I1011 03:55:35.454920 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-7fk5l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4b6a179c-1d23-4cb2-91b9-d34496a74456\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5bfabccc875a12072ad81dc6bf74d849c99baa004c10d213f798c363248137db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b908ba64c3d6d1d45e1cab82e69176c3c84bf88dde3e9425fc7c3b4c84685ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8b908ba64c3d6d1d45e1cab82e69176c3c84bf88dde3e9425fc7c3b4c84685ed\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae38c0781a4a8367bba95648253e72409e06cdc39825b137084fa6f4d33a4cb7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ae38c0781a4a8367bba95648253e72409e06cdc39825b137084fa6f4d33a4cb7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b663ffff34d80940880b3959c81519084b55bd5edcedbce9402d5fad1f898034\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b663ffff34d80940880b3959c81519084b55bd5edcedbce9402d5fad1f898034\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5b4df13fbe7bbb9ba49f772c2bd4bfe6a10a17bfa12bc500f0647659be7b0cdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5b4df13fbe7bbb9ba49f772c2bd4bfe6a10a17bfa12bc500f0647659be7b0cdf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://997fd5172ccd393f7befe4343619dd30eba136428035ac9fc71f0ec99791a53b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://997fd5172ccd393f7befe4343619dd30eba136428035ac9fc71f0ec99791a53b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://20198b637812738041fd2e57cdb24097b8f92af726b1ad103fee423469a27136\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://20198b637812738041fd2e57cdb24097b8f92af726b1ad103fee423469a27136\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-7fk5l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:35Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:35 crc kubenswrapper[4798]: I1011 03:55:35.481187 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-svt4z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3c2a342b-5252-4957-9e2c-8f81c304b8af\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5a66de04a274f07f090f66d9da727e1985f9c171038de366b409a0300d1b6cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5667e3d7288833ff8c576f64f111c3a2f59619a3577f969d0afdd633962bd30d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dd63d88fe2ebe6035765ee3f3c05c2ef747f9b2869da239825c08667935c9fb5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://671945e25068ff79ab8574dd96b2d81646b8240da41f34a10aa857f0c2520df2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://337fb496a29d8027f6400b57b1625b68029a16e368139f753b9b94df22448477\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c9529e6c53d42e111799753c41ef620b382b2a2b1991aa71d4ee846029a5136\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f74df615a5e83aeb9df24b50315da33c1d6d05d5c25198463d48b0463966d797\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://732433bbe785758ed4cbedf667608c4780a5d1e7e2eafd432735f0d74ea01d48\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9f8c139ffa0a5dc24b40f6d5bcc2fa9422f58b275692d3ce3ba5a928c1e454a7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f8c139ffa0a5dc24b40f6d5bcc2fa9422f58b275692d3ce3ba5a928c1e454a7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:26Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-svt4z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:35Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:35 crc kubenswrapper[4798]: I1011 03:55:35.502317 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-cwk9h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a97c4ffd-df63-4ce6-8903-80849a23a9fa\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3dc58b162f1d2b3447282c149ec7cd6d60b4c53fa3cf51dfb4a821d115c9a2c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pfvbf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:28Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-cwk9h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:35Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:35 crc kubenswrapper[4798]: I1011 03:55:35.518755 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bfd670e2-5a6d-4d56-97d6-c72b5474cfe6\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d79289a0b2f4d9eee836f57ba898bd329060194772e80855547f1982371b0921\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1970a648bf4ee991b9b58bfade524bee260b45f727cba5864a6545a10487b3ae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a79edde80930f3aad632c14d6ad6b3df6f8449f32107a82eb906785e873e1ab7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4b8e09f509a9f25922d49024be407873d79d01edb604193f9d19fcd6c89cf96d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://805f894634e76814dc3071315ddd3d8ac46cdf65a12e97f4ca56b98fe53ab25a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-11T03:55:19Z\\\",\\\"message\\\":\\\"W1011 03:55:08.564227 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1011 03:55:08.564630 1 crypto.go:601] Generating new CA for check-endpoints-signer@1760154908 cert, and key in /tmp/serving-cert-1527351980/serving-signer.crt, /tmp/serving-cert-1527351980/serving-signer.key\\\\nI1011 03:55:08.869630 1 observer_polling.go:159] Starting file observer\\\\nW1011 03:55:08.874583 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1011 03:55:08.874721 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1011 03:55:08.878058 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1527351980/tls.crt::/tmp/serving-cert-1527351980/tls.key\\\\\\\"\\\\nF1011 03:55:19.813421 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:08Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://58ca7c079d81ba990c4508c024d6db7c3503be1d628477798459b32e2374ef22\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:08Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1171e7b719c532d99d1d222fc5dbc434060d5d149bd2b6b3dea02f9716816b26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1171e7b719c532d99d1d222fc5dbc434060d5d149bd2b6b3dea02f9716816b26\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:05Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:35Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:35 crc kubenswrapper[4798]: I1011 03:55:35.528885 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:35 crc kubenswrapper[4798]: I1011 03:55:35.528921 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:35 crc kubenswrapper[4798]: I1011 03:55:35.528930 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:35 crc kubenswrapper[4798]: I1011 03:55:35.528943 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:35 crc kubenswrapper[4798]: I1011 03:55:35.528953 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:35Z","lastTransitionTime":"2025-10-11T03:55:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:35 crc kubenswrapper[4798]: I1011 03:55:35.530026 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2f4f8842-e378-44a9-b919-b1bf9a8df80e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://903e13c07c622087e30f21ffff2b60c31c852d8e994c23d7af6beab5a71bb873\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1087e7b8de486a192155ed94497edbca0dc98e4a84a577267fb132fc48e6b142\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://08a33c11cee6a48ebcf66f251111b8bd888bd40fe6b52f43a3768eac817ed59f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eff419f507fc5362e956419c6fd14dc6a161f7297b026a5ec449647051f98767\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:05Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:35Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:35 crc kubenswrapper[4798]: I1011 03:55:35.542474 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4936cbe98df337a2b4d0d6e3effe0afd990facff65daf923c475c9108e05fef9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:35Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:35 crc kubenswrapper[4798]: I1011 03:55:35.554954 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:35Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:35 crc kubenswrapper[4798]: I1011 03:55:35.564143 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kszgd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e999fc8d-dd8d-415f-b72b-1c6189cf18a9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a92a11778001614d226c95af9dde787c78232568c3b5c073618a843f1225b21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h6wm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kszgd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:35Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:35 crc kubenswrapper[4798]: I1011 03:55:35.575682 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:35Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:35 crc kubenswrapper[4798]: I1011 03:55:35.585862 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5ec61df088ad49a0392264c33da84b7324c4aa608201d71e309401ab80013fb8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://10697a88b117962307b632f70354acdfd493e92c2909908a31c90c754dcbc0ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:35Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:35 crc kubenswrapper[4798]: I1011 03:55:35.597050 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6129cc4f78a62c5fb76f4859ec954c8901954d27b7f05672e97d992e0c933191\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:35Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:35 crc kubenswrapper[4798]: I1011 03:55:35.607405 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-p6xdd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bd9c0e44-3329-422a-907b-e9e9bb6194cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a46e4bfff29af67bd6304e68df8fb4aa7e2184648b85398f453fde878b27d825\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p9wst\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-p6xdd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:35Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:35 crc kubenswrapper[4798]: I1011 03:55:35.618148 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6129cc4f78a62c5fb76f4859ec954c8901954d27b7f05672e97d992e0c933191\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:35Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:35 crc kubenswrapper[4798]: I1011 03:55:35.630524 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-p6xdd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bd9c0e44-3329-422a-907b-e9e9bb6194cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a46e4bfff29af67bd6304e68df8fb4aa7e2184648b85398f453fde878b27d825\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p9wst\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-p6xdd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:35Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:35 crc kubenswrapper[4798]: I1011 03:55:35.631597 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:35 crc kubenswrapper[4798]: I1011 03:55:35.631626 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:35 crc kubenswrapper[4798]: I1011 03:55:35.631635 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:35 crc kubenswrapper[4798]: I1011 03:55:35.631649 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:35 crc kubenswrapper[4798]: I1011 03:55:35.631659 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:35Z","lastTransitionTime":"2025-10-11T03:55:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:35 crc kubenswrapper[4798]: I1011 03:55:35.643271 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:35Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:35 crc kubenswrapper[4798]: I1011 03:55:35.653264 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42571bc8-2186-4e3b-bba9-28f5a8f364d0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c66c40f71f0877eee058976f94625fe1c4bcab5dfad363be39e6863c2e9f07a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5gdw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d4862faa12fa49bd57098b8b4e9e9dd287da8c8808a9ca32bfdd3a9b4313945d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5gdw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-h28s2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:35Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:35 crc kubenswrapper[4798]: I1011 03:55:35.668297 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-7fk5l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4b6a179c-1d23-4cb2-91b9-d34496a74456\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5bfabccc875a12072ad81dc6bf74d849c99baa004c10d213f798c363248137db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b908ba64c3d6d1d45e1cab82e69176c3c84bf88dde3e9425fc7c3b4c84685ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8b908ba64c3d6d1d45e1cab82e69176c3c84bf88dde3e9425fc7c3b4c84685ed\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae38c0781a4a8367bba95648253e72409e06cdc39825b137084fa6f4d33a4cb7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ae38c0781a4a8367bba95648253e72409e06cdc39825b137084fa6f4d33a4cb7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b663ffff34d80940880b3959c81519084b55bd5edcedbce9402d5fad1f898034\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b663ffff34d80940880b3959c81519084b55bd5edcedbce9402d5fad1f898034\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5b4df13fbe7bbb9ba49f772c2bd4bfe6a10a17bfa12bc500f0647659be7b0cdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5b4df13fbe7bbb9ba49f772c2bd4bfe6a10a17bfa12bc500f0647659be7b0cdf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://997fd5172ccd393f7befe4343619dd30eba136428035ac9fc71f0ec99791a53b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://997fd5172ccd393f7befe4343619dd30eba136428035ac9fc71f0ec99791a53b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://20198b637812738041fd2e57cdb24097b8f92af726b1ad103fee423469a27136\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://20198b637812738041fd2e57cdb24097b8f92af726b1ad103fee423469a27136\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-7fk5l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:35Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:35 crc kubenswrapper[4798]: I1011 03:55:35.676756 4798 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Oct 11 03:55:35 crc kubenswrapper[4798]: I1011 03:55:35.684799 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-svt4z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3c2a342b-5252-4957-9e2c-8f81c304b8af\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5a66de04a274f07f090f66d9da727e1985f9c171038de366b409a0300d1b6cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5667e3d7288833ff8c576f64f111c3a2f59619a3577f969d0afdd633962bd30d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dd63d88fe2ebe6035765ee3f3c05c2ef747f9b2869da239825c08667935c9fb5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://671945e25068ff79ab8574dd96b2d81646b8240da41f34a10aa857f0c2520df2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://337fb496a29d8027f6400b57b1625b68029a16e368139f753b9b94df22448477\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c9529e6c53d42e111799753c41ef620b382b2a2b1991aa71d4ee846029a5136\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f74df615a5e83aeb9df24b50315da33c1d6d05d5c25198463d48b0463966d797\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://732433bbe785758ed4cbedf667608c4780a5d1e7e2eafd432735f0d74ea01d48\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9f8c139ffa0a5dc24b40f6d5bcc2fa9422f58b275692d3ce3ba5a928c1e454a7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f8c139ffa0a5dc24b40f6d5bcc2fa9422f58b275692d3ce3ba5a928c1e454a7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:26Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-svt4z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:35Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:35 crc kubenswrapper[4798]: I1011 03:55:35.694068 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-cwk9h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a97c4ffd-df63-4ce6-8903-80849a23a9fa\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3dc58b162f1d2b3447282c149ec7cd6d60b4c53fa3cf51dfb4a821d115c9a2c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pfvbf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:28Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-cwk9h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:35Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:35 crc kubenswrapper[4798]: I1011 03:55:35.707614 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bfd670e2-5a6d-4d56-97d6-c72b5474cfe6\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d79289a0b2f4d9eee836f57ba898bd329060194772e80855547f1982371b0921\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1970a648bf4ee991b9b58bfade524bee260b45f727cba5864a6545a10487b3ae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a79edde80930f3aad632c14d6ad6b3df6f8449f32107a82eb906785e873e1ab7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4b8e09f509a9f25922d49024be407873d79d01edb604193f9d19fcd6c89cf96d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://805f894634e76814dc3071315ddd3d8ac46cdf65a12e97f4ca56b98fe53ab25a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-11T03:55:19Z\\\",\\\"message\\\":\\\"W1011 03:55:08.564227 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1011 03:55:08.564630 1 crypto.go:601] Generating new CA for check-endpoints-signer@1760154908 cert, and key in /tmp/serving-cert-1527351980/serving-signer.crt, /tmp/serving-cert-1527351980/serving-signer.key\\\\nI1011 03:55:08.869630 1 observer_polling.go:159] Starting file observer\\\\nW1011 03:55:08.874583 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1011 03:55:08.874721 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1011 03:55:08.878058 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1527351980/tls.crt::/tmp/serving-cert-1527351980/tls.key\\\\\\\"\\\\nF1011 03:55:19.813421 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:08Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://58ca7c079d81ba990c4508c024d6db7c3503be1d628477798459b32e2374ef22\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:08Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1171e7b719c532d99d1d222fc5dbc434060d5d149bd2b6b3dea02f9716816b26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1171e7b719c532d99d1d222fc5dbc434060d5d149bd2b6b3dea02f9716816b26\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:05Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:35Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:35 crc kubenswrapper[4798]: I1011 03:55:35.719543 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2f4f8842-e378-44a9-b919-b1bf9a8df80e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://903e13c07c622087e30f21ffff2b60c31c852d8e994c23d7af6beab5a71bb873\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1087e7b8de486a192155ed94497edbca0dc98e4a84a577267fb132fc48e6b142\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://08a33c11cee6a48ebcf66f251111b8bd888bd40fe6b52f43a3768eac817ed59f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eff419f507fc5362e956419c6fd14dc6a161f7297b026a5ec449647051f98767\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:05Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:35Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:35 crc kubenswrapper[4798]: I1011 03:55:35.729773 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4936cbe98df337a2b4d0d6e3effe0afd990facff65daf923c475c9108e05fef9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:35Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:35 crc kubenswrapper[4798]: I1011 03:55:35.732935 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:35 crc kubenswrapper[4798]: I1011 03:55:35.732965 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:35 crc kubenswrapper[4798]: I1011 03:55:35.732973 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:35 crc kubenswrapper[4798]: I1011 03:55:35.732986 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:35 crc kubenswrapper[4798]: I1011 03:55:35.732994 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:35Z","lastTransitionTime":"2025-10-11T03:55:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:35 crc kubenswrapper[4798]: I1011 03:55:35.742302 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:35Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:35 crc kubenswrapper[4798]: I1011 03:55:35.751199 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kszgd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e999fc8d-dd8d-415f-b72b-1c6189cf18a9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a92a11778001614d226c95af9dde787c78232568c3b5c073618a843f1225b21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h6wm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kszgd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:35Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:35 crc kubenswrapper[4798]: I1011 03:55:35.763881 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:35Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:35 crc kubenswrapper[4798]: I1011 03:55:35.775460 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5ec61df088ad49a0392264c33da84b7324c4aa608201d71e309401ab80013fb8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://10697a88b117962307b632f70354acdfd493e92c2909908a31c90c754dcbc0ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:35Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:35 crc kubenswrapper[4798]: I1011 03:55:35.836235 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:35 crc kubenswrapper[4798]: I1011 03:55:35.836272 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:35 crc kubenswrapper[4798]: I1011 03:55:35.836281 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:35 crc kubenswrapper[4798]: I1011 03:55:35.836296 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:35 crc kubenswrapper[4798]: I1011 03:55:35.836306 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:35Z","lastTransitionTime":"2025-10-11T03:55:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:35 crc kubenswrapper[4798]: I1011 03:55:35.938568 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:35 crc kubenswrapper[4798]: I1011 03:55:35.938606 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:35 crc kubenswrapper[4798]: I1011 03:55:35.938615 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:35 crc kubenswrapper[4798]: I1011 03:55:35.938627 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:35 crc kubenswrapper[4798]: I1011 03:55:35.938636 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:35Z","lastTransitionTime":"2025-10-11T03:55:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:36 crc kubenswrapper[4798]: I1011 03:55:36.041035 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:36 crc kubenswrapper[4798]: I1011 03:55:36.041075 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:36 crc kubenswrapper[4798]: I1011 03:55:36.041086 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:36 crc kubenswrapper[4798]: I1011 03:55:36.041103 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:36 crc kubenswrapper[4798]: I1011 03:55:36.041120 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:36Z","lastTransitionTime":"2025-10-11T03:55:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:36 crc kubenswrapper[4798]: I1011 03:55:36.143745 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:36 crc kubenswrapper[4798]: I1011 03:55:36.143777 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:36 crc kubenswrapper[4798]: I1011 03:55:36.143786 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:36 crc kubenswrapper[4798]: I1011 03:55:36.143801 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:36 crc kubenswrapper[4798]: I1011 03:55:36.143810 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:36Z","lastTransitionTime":"2025-10-11T03:55:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:36 crc kubenswrapper[4798]: I1011 03:55:36.246176 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:36 crc kubenswrapper[4798]: I1011 03:55:36.246212 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:36 crc kubenswrapper[4798]: I1011 03:55:36.246224 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:36 crc kubenswrapper[4798]: I1011 03:55:36.246240 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:36 crc kubenswrapper[4798]: I1011 03:55:36.246252 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:36Z","lastTransitionTime":"2025-10-11T03:55:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:36 crc kubenswrapper[4798]: I1011 03:55:36.348338 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:36 crc kubenswrapper[4798]: I1011 03:55:36.348378 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:36 crc kubenswrapper[4798]: I1011 03:55:36.348422 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:36 crc kubenswrapper[4798]: I1011 03:55:36.348445 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:36 crc kubenswrapper[4798]: I1011 03:55:36.348462 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:36Z","lastTransitionTime":"2025-10-11T03:55:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:36 crc kubenswrapper[4798]: I1011 03:55:36.450852 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:36 crc kubenswrapper[4798]: I1011 03:55:36.450923 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:36 crc kubenswrapper[4798]: I1011 03:55:36.450947 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:36 crc kubenswrapper[4798]: I1011 03:55:36.450971 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:36 crc kubenswrapper[4798]: I1011 03:55:36.450989 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:36Z","lastTransitionTime":"2025-10-11T03:55:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:36 crc kubenswrapper[4798]: I1011 03:55:36.553703 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:36 crc kubenswrapper[4798]: I1011 03:55:36.553758 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:36 crc kubenswrapper[4798]: I1011 03:55:36.553769 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:36 crc kubenswrapper[4798]: I1011 03:55:36.553787 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:36 crc kubenswrapper[4798]: I1011 03:55:36.553801 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:36Z","lastTransitionTime":"2025-10-11T03:55:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:36 crc kubenswrapper[4798]: I1011 03:55:36.656310 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:36 crc kubenswrapper[4798]: I1011 03:55:36.656367 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:36 crc kubenswrapper[4798]: I1011 03:55:36.656376 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:36 crc kubenswrapper[4798]: I1011 03:55:36.656411 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:36 crc kubenswrapper[4798]: I1011 03:55:36.656422 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:36Z","lastTransitionTime":"2025-10-11T03:55:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:36 crc kubenswrapper[4798]: I1011 03:55:36.681953 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-svt4z_3c2a342b-5252-4957-9e2c-8f81c304b8af/ovnkube-controller/0.log" Oct 11 03:55:36 crc kubenswrapper[4798]: I1011 03:55:36.685516 4798 generic.go:334] "Generic (PLEG): container finished" podID="3c2a342b-5252-4957-9e2c-8f81c304b8af" containerID="f74df615a5e83aeb9df24b50315da33c1d6d05d5c25198463d48b0463966d797" exitCode=1 Oct 11 03:55:36 crc kubenswrapper[4798]: I1011 03:55:36.685563 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-svt4z" event={"ID":"3c2a342b-5252-4957-9e2c-8f81c304b8af","Type":"ContainerDied","Data":"f74df615a5e83aeb9df24b50315da33c1d6d05d5c25198463d48b0463966d797"} Oct 11 03:55:36 crc kubenswrapper[4798]: I1011 03:55:36.686221 4798 scope.go:117] "RemoveContainer" containerID="f74df615a5e83aeb9df24b50315da33c1d6d05d5c25198463d48b0463966d797" Oct 11 03:55:36 crc kubenswrapper[4798]: I1011 03:55:36.701806 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5ec61df088ad49a0392264c33da84b7324c4aa608201d71e309401ab80013fb8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://10697a88b117962307b632f70354acdfd493e92c2909908a31c90c754dcbc0ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:36Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:36 crc kubenswrapper[4798]: I1011 03:55:36.716364 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6129cc4f78a62c5fb76f4859ec954c8901954d27b7f05672e97d992e0c933191\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:36Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:36 crc kubenswrapper[4798]: I1011 03:55:36.730807 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-p6xdd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bd9c0e44-3329-422a-907b-e9e9bb6194cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a46e4bfff29af67bd6304e68df8fb4aa7e2184648b85398f453fde878b27d825\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p9wst\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-p6xdd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:36Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:36 crc kubenswrapper[4798]: I1011 03:55:36.746027 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:36Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:36 crc kubenswrapper[4798]: I1011 03:55:36.756960 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42571bc8-2186-4e3b-bba9-28f5a8f364d0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c66c40f71f0877eee058976f94625fe1c4bcab5dfad363be39e6863c2e9f07a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5gdw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d4862faa12fa49bd57098b8b4e9e9dd287da8c8808a9ca32bfdd3a9b4313945d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5gdw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-h28s2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:36Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:36 crc kubenswrapper[4798]: I1011 03:55:36.758096 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:36 crc kubenswrapper[4798]: I1011 03:55:36.758141 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:36 crc kubenswrapper[4798]: I1011 03:55:36.758155 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:36 crc kubenswrapper[4798]: I1011 03:55:36.758171 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:36 crc kubenswrapper[4798]: I1011 03:55:36.758183 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:36Z","lastTransitionTime":"2025-10-11T03:55:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:36 crc kubenswrapper[4798]: I1011 03:55:36.776636 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-7fk5l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4b6a179c-1d23-4cb2-91b9-d34496a74456\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5bfabccc875a12072ad81dc6bf74d849c99baa004c10d213f798c363248137db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b908ba64c3d6d1d45e1cab82e69176c3c84bf88dde3e9425fc7c3b4c84685ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8b908ba64c3d6d1d45e1cab82e69176c3c84bf88dde3e9425fc7c3b4c84685ed\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae38c0781a4a8367bba95648253e72409e06cdc39825b137084fa6f4d33a4cb7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ae38c0781a4a8367bba95648253e72409e06cdc39825b137084fa6f4d33a4cb7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b663ffff34d80940880b3959c81519084b55bd5edcedbce9402d5fad1f898034\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b663ffff34d80940880b3959c81519084b55bd5edcedbce9402d5fad1f898034\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5b4df13fbe7bbb9ba49f772c2bd4bfe6a10a17bfa12bc500f0647659be7b0cdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5b4df13fbe7bbb9ba49f772c2bd4bfe6a10a17bfa12bc500f0647659be7b0cdf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://997fd5172ccd393f7befe4343619dd30eba136428035ac9fc71f0ec99791a53b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://997fd5172ccd393f7befe4343619dd30eba136428035ac9fc71f0ec99791a53b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://20198b637812738041fd2e57cdb24097b8f92af726b1ad103fee423469a27136\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://20198b637812738041fd2e57cdb24097b8f92af726b1ad103fee423469a27136\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-7fk5l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:36Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:36 crc kubenswrapper[4798]: I1011 03:55:36.798482 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-svt4z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3c2a342b-5252-4957-9e2c-8f81c304b8af\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5a66de04a274f07f090f66d9da727e1985f9c171038de366b409a0300d1b6cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5667e3d7288833ff8c576f64f111c3a2f59619a3577f969d0afdd633962bd30d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dd63d88fe2ebe6035765ee3f3c05c2ef747f9b2869da239825c08667935c9fb5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://671945e25068ff79ab8574dd96b2d81646b8240da41f34a10aa857f0c2520df2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://337fb496a29d8027f6400b57b1625b68029a16e368139f753b9b94df22448477\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c9529e6c53d42e111799753c41ef620b382b2a2b1991aa71d4ee846029a5136\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f74df615a5e83aeb9df24b50315da33c1d6d05d5c25198463d48b0463966d797\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f74df615a5e83aeb9df24b50315da33c1d6d05d5c25198463d48b0463966d797\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-11T03:55:35Z\\\",\\\"message\\\":\\\"val\\\\nI1011 03:55:35.886832 6105 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1011 03:55:35.886853 6105 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1011 03:55:35.887159 6105 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1011 03:55:35.887199 6105 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1011 03:55:35.887235 6105 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1011 03:55:35.887241 6105 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1011 03:55:35.887242 6105 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1011 03:55:35.887256 6105 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1011 03:55:35.887323 6105 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1011 03:55:35.887361 6105 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1011 03:55:35.887385 6105 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1011 03:55:35.887372 6105 handler.go:208] Removed *v1.Node event handler 2\\\\nI1011 03:55:35.887443 6105 handler.go:208] Removed *v1.Node event handler 7\\\\nI1011 03:55:35.887509 6105 factory.go:656] Stopping watch factory\\\\nI1011 03:55:35.887539 6105 ovnkube.go:599] Stopped ovnkube\\\\nI1011 03:55:35.887518 6105 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://732433bbe785758ed4cbedf667608c4780a5d1e7e2eafd432735f0d74ea01d48\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9f8c139ffa0a5dc24b40f6d5bcc2fa9422f58b275692d3ce3ba5a928c1e454a7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f8c139ffa0a5dc24b40f6d5bcc2fa9422f58b275692d3ce3ba5a928c1e454a7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:26Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-svt4z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:36Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:36 crc kubenswrapper[4798]: I1011 03:55:36.809194 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-cwk9h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a97c4ffd-df63-4ce6-8903-80849a23a9fa\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3dc58b162f1d2b3447282c149ec7cd6d60b4c53fa3cf51dfb4a821d115c9a2c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pfvbf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:28Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-cwk9h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:36Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:36 crc kubenswrapper[4798]: I1011 03:55:36.825015 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bfd670e2-5a6d-4d56-97d6-c72b5474cfe6\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d79289a0b2f4d9eee836f57ba898bd329060194772e80855547f1982371b0921\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1970a648bf4ee991b9b58bfade524bee260b45f727cba5864a6545a10487b3ae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a79edde80930f3aad632c14d6ad6b3df6f8449f32107a82eb906785e873e1ab7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4b8e09f509a9f25922d49024be407873d79d01edb604193f9d19fcd6c89cf96d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://805f894634e76814dc3071315ddd3d8ac46cdf65a12e97f4ca56b98fe53ab25a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-11T03:55:19Z\\\",\\\"message\\\":\\\"W1011 03:55:08.564227 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1011 03:55:08.564630 1 crypto.go:601] Generating new CA for check-endpoints-signer@1760154908 cert, and key in /tmp/serving-cert-1527351980/serving-signer.crt, /tmp/serving-cert-1527351980/serving-signer.key\\\\nI1011 03:55:08.869630 1 observer_polling.go:159] Starting file observer\\\\nW1011 03:55:08.874583 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1011 03:55:08.874721 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1011 03:55:08.878058 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1527351980/tls.crt::/tmp/serving-cert-1527351980/tls.key\\\\\\\"\\\\nF1011 03:55:19.813421 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:08Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://58ca7c079d81ba990c4508c024d6db7c3503be1d628477798459b32e2374ef22\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:08Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1171e7b719c532d99d1d222fc5dbc434060d5d149bd2b6b3dea02f9716816b26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1171e7b719c532d99d1d222fc5dbc434060d5d149bd2b6b3dea02f9716816b26\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:05Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:36Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:36 crc kubenswrapper[4798]: I1011 03:55:36.838846 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2f4f8842-e378-44a9-b919-b1bf9a8df80e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://903e13c07c622087e30f21ffff2b60c31c852d8e994c23d7af6beab5a71bb873\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1087e7b8de486a192155ed94497edbca0dc98e4a84a577267fb132fc48e6b142\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://08a33c11cee6a48ebcf66f251111b8bd888bd40fe6b52f43a3768eac817ed59f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eff419f507fc5362e956419c6fd14dc6a161f7297b026a5ec449647051f98767\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:05Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:36Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:36 crc kubenswrapper[4798]: I1011 03:55:36.852263 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4936cbe98df337a2b4d0d6e3effe0afd990facff65daf923c475c9108e05fef9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:36Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:36 crc kubenswrapper[4798]: I1011 03:55:36.859942 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:36 crc kubenswrapper[4798]: I1011 03:55:36.859978 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:36 crc kubenswrapper[4798]: I1011 03:55:36.859992 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:36 crc kubenswrapper[4798]: I1011 03:55:36.860011 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:36 crc kubenswrapper[4798]: I1011 03:55:36.860022 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:36Z","lastTransitionTime":"2025-10-11T03:55:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:36 crc kubenswrapper[4798]: I1011 03:55:36.868093 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:36Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:36 crc kubenswrapper[4798]: I1011 03:55:36.881152 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kszgd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e999fc8d-dd8d-415f-b72b-1c6189cf18a9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a92a11778001614d226c95af9dde787c78232568c3b5c073618a843f1225b21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h6wm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kszgd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:36Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:36 crc kubenswrapper[4798]: I1011 03:55:36.894329 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:36Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:36 crc kubenswrapper[4798]: I1011 03:55:36.962451 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:36 crc kubenswrapper[4798]: I1011 03:55:36.962514 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:36 crc kubenswrapper[4798]: I1011 03:55:36.962524 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:36 crc kubenswrapper[4798]: I1011 03:55:36.962559 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:36 crc kubenswrapper[4798]: I1011 03:55:36.962572 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:36Z","lastTransitionTime":"2025-10-11T03:55:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:37 crc kubenswrapper[4798]: I1011 03:55:37.065719 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:37 crc kubenswrapper[4798]: I1011 03:55:37.065763 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:37 crc kubenswrapper[4798]: I1011 03:55:37.065772 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:37 crc kubenswrapper[4798]: I1011 03:55:37.065791 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:37 crc kubenswrapper[4798]: I1011 03:55:37.065803 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:37Z","lastTransitionTime":"2025-10-11T03:55:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:37 crc kubenswrapper[4798]: I1011 03:55:37.168236 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:37 crc kubenswrapper[4798]: I1011 03:55:37.168266 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:37 crc kubenswrapper[4798]: I1011 03:55:37.168275 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:37 crc kubenswrapper[4798]: I1011 03:55:37.168288 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:37 crc kubenswrapper[4798]: I1011 03:55:37.168298 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:37Z","lastTransitionTime":"2025-10-11T03:55:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:37 crc kubenswrapper[4798]: I1011 03:55:37.270468 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:37 crc kubenswrapper[4798]: I1011 03:55:37.270499 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:37 crc kubenswrapper[4798]: I1011 03:55:37.270507 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:37 crc kubenswrapper[4798]: I1011 03:55:37.270519 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:37 crc kubenswrapper[4798]: I1011 03:55:37.270528 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:37Z","lastTransitionTime":"2025-10-11T03:55:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:37 crc kubenswrapper[4798]: I1011 03:55:37.372886 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:37 crc kubenswrapper[4798]: I1011 03:55:37.372930 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:37 crc kubenswrapper[4798]: I1011 03:55:37.372942 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:37 crc kubenswrapper[4798]: I1011 03:55:37.372959 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:37 crc kubenswrapper[4798]: I1011 03:55:37.372971 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:37Z","lastTransitionTime":"2025-10-11T03:55:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:37 crc kubenswrapper[4798]: I1011 03:55:37.422965 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 11 03:55:37 crc kubenswrapper[4798]: E1011 03:55:37.423106 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 11 03:55:37 crc kubenswrapper[4798]: I1011 03:55:37.423526 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 11 03:55:37 crc kubenswrapper[4798]: E1011 03:55:37.423585 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 11 03:55:37 crc kubenswrapper[4798]: I1011 03:55:37.423638 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 11 03:55:37 crc kubenswrapper[4798]: E1011 03:55:37.423690 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 11 03:55:37 crc kubenswrapper[4798]: I1011 03:55:37.475330 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:37 crc kubenswrapper[4798]: I1011 03:55:37.475358 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:37 crc kubenswrapper[4798]: I1011 03:55:37.475366 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:37 crc kubenswrapper[4798]: I1011 03:55:37.475379 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:37 crc kubenswrapper[4798]: I1011 03:55:37.475388 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:37Z","lastTransitionTime":"2025-10-11T03:55:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:37 crc kubenswrapper[4798]: I1011 03:55:37.578526 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:37 crc kubenswrapper[4798]: I1011 03:55:37.579171 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:37 crc kubenswrapper[4798]: I1011 03:55:37.579239 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:37 crc kubenswrapper[4798]: I1011 03:55:37.579329 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:37 crc kubenswrapper[4798]: I1011 03:55:37.579384 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:37Z","lastTransitionTime":"2025-10-11T03:55:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:37 crc kubenswrapper[4798]: I1011 03:55:37.681965 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:37 crc kubenswrapper[4798]: I1011 03:55:37.682237 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:37 crc kubenswrapper[4798]: I1011 03:55:37.682299 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:37 crc kubenswrapper[4798]: I1011 03:55:37.682357 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:37 crc kubenswrapper[4798]: I1011 03:55:37.682448 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:37Z","lastTransitionTime":"2025-10-11T03:55:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:37 crc kubenswrapper[4798]: I1011 03:55:37.689614 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-svt4z_3c2a342b-5252-4957-9e2c-8f81c304b8af/ovnkube-controller/0.log" Oct 11 03:55:37 crc kubenswrapper[4798]: I1011 03:55:37.691976 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-svt4z" event={"ID":"3c2a342b-5252-4957-9e2c-8f81c304b8af","Type":"ContainerStarted","Data":"0c1c7c6aa877690a8caf39779fec8180efdbf7169264c931008c3f639d938242"} Oct 11 03:55:37 crc kubenswrapper[4798]: I1011 03:55:37.692160 4798 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Oct 11 03:55:37 crc kubenswrapper[4798]: I1011 03:55:37.706466 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6129cc4f78a62c5fb76f4859ec954c8901954d27b7f05672e97d992e0c933191\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:37Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:37 crc kubenswrapper[4798]: I1011 03:55:37.720238 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-p6xdd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bd9c0e44-3329-422a-907b-e9e9bb6194cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a46e4bfff29af67bd6304e68df8fb4aa7e2184648b85398f453fde878b27d825\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p9wst\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-p6xdd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:37Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:37 crc kubenswrapper[4798]: I1011 03:55:37.733182 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:37Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:37 crc kubenswrapper[4798]: I1011 03:55:37.747063 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42571bc8-2186-4e3b-bba9-28f5a8f364d0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c66c40f71f0877eee058976f94625fe1c4bcab5dfad363be39e6863c2e9f07a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5gdw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d4862faa12fa49bd57098b8b4e9e9dd287da8c8808a9ca32bfdd3a9b4313945d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5gdw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-h28s2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:37Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:37 crc kubenswrapper[4798]: I1011 03:55:37.760606 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-7fk5l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4b6a179c-1d23-4cb2-91b9-d34496a74456\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5bfabccc875a12072ad81dc6bf74d849c99baa004c10d213f798c363248137db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b908ba64c3d6d1d45e1cab82e69176c3c84bf88dde3e9425fc7c3b4c84685ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8b908ba64c3d6d1d45e1cab82e69176c3c84bf88dde3e9425fc7c3b4c84685ed\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae38c0781a4a8367bba95648253e72409e06cdc39825b137084fa6f4d33a4cb7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ae38c0781a4a8367bba95648253e72409e06cdc39825b137084fa6f4d33a4cb7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b663ffff34d80940880b3959c81519084b55bd5edcedbce9402d5fad1f898034\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b663ffff34d80940880b3959c81519084b55bd5edcedbce9402d5fad1f898034\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5b4df13fbe7bbb9ba49f772c2bd4bfe6a10a17bfa12bc500f0647659be7b0cdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5b4df13fbe7bbb9ba49f772c2bd4bfe6a10a17bfa12bc500f0647659be7b0cdf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://997fd5172ccd393f7befe4343619dd30eba136428035ac9fc71f0ec99791a53b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://997fd5172ccd393f7befe4343619dd30eba136428035ac9fc71f0ec99791a53b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://20198b637812738041fd2e57cdb24097b8f92af726b1ad103fee423469a27136\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://20198b637812738041fd2e57cdb24097b8f92af726b1ad103fee423469a27136\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-7fk5l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:37Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:37 crc kubenswrapper[4798]: I1011 03:55:37.784257 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:37 crc kubenswrapper[4798]: I1011 03:55:37.784299 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:37 crc kubenswrapper[4798]: I1011 03:55:37.784308 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:37 crc kubenswrapper[4798]: I1011 03:55:37.784324 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:37 crc kubenswrapper[4798]: I1011 03:55:37.784334 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:37Z","lastTransitionTime":"2025-10-11T03:55:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:37 crc kubenswrapper[4798]: I1011 03:55:37.793206 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-svt4z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3c2a342b-5252-4957-9e2c-8f81c304b8af\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5a66de04a274f07f090f66d9da727e1985f9c171038de366b409a0300d1b6cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5667e3d7288833ff8c576f64f111c3a2f59619a3577f969d0afdd633962bd30d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dd63d88fe2ebe6035765ee3f3c05c2ef747f9b2869da239825c08667935c9fb5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://671945e25068ff79ab8574dd96b2d81646b8240da41f34a10aa857f0c2520df2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://337fb496a29d8027f6400b57b1625b68029a16e368139f753b9b94df22448477\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c9529e6c53d42e111799753c41ef620b382b2a2b1991aa71d4ee846029a5136\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0c1c7c6aa877690a8caf39779fec8180efdbf7169264c931008c3f639d938242\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f74df615a5e83aeb9df24b50315da33c1d6d05d5c25198463d48b0463966d797\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-11T03:55:35Z\\\",\\\"message\\\":\\\"val\\\\nI1011 03:55:35.886832 6105 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1011 03:55:35.886853 6105 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1011 03:55:35.887159 6105 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1011 03:55:35.887199 6105 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1011 03:55:35.887235 6105 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1011 03:55:35.887241 6105 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1011 03:55:35.887242 6105 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1011 03:55:35.887256 6105 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1011 03:55:35.887323 6105 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1011 03:55:35.887361 6105 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1011 03:55:35.887385 6105 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1011 03:55:35.887372 6105 handler.go:208] Removed *v1.Node event handler 2\\\\nI1011 03:55:35.887443 6105 handler.go:208] Removed *v1.Node event handler 7\\\\nI1011 03:55:35.887509 6105 factory.go:656] Stopping watch factory\\\\nI1011 03:55:35.887539 6105 ovnkube.go:599] Stopped ovnkube\\\\nI1011 03:55:35.887518 6105 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:32Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://732433bbe785758ed4cbedf667608c4780a5d1e7e2eafd432735f0d74ea01d48\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9f8c139ffa0a5dc24b40f6d5bcc2fa9422f58b275692d3ce3ba5a928c1e454a7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f8c139ffa0a5dc24b40f6d5bcc2fa9422f58b275692d3ce3ba5a928c1e454a7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:26Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-svt4z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:37Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:37 crc kubenswrapper[4798]: I1011 03:55:37.806099 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-cwk9h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a97c4ffd-df63-4ce6-8903-80849a23a9fa\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3dc58b162f1d2b3447282c149ec7cd6d60b4c53fa3cf51dfb4a821d115c9a2c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pfvbf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:28Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-cwk9h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:37Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:37 crc kubenswrapper[4798]: I1011 03:55:37.820508 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bfd670e2-5a6d-4d56-97d6-c72b5474cfe6\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d79289a0b2f4d9eee836f57ba898bd329060194772e80855547f1982371b0921\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1970a648bf4ee991b9b58bfade524bee260b45f727cba5864a6545a10487b3ae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a79edde80930f3aad632c14d6ad6b3df6f8449f32107a82eb906785e873e1ab7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4b8e09f509a9f25922d49024be407873d79d01edb604193f9d19fcd6c89cf96d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://805f894634e76814dc3071315ddd3d8ac46cdf65a12e97f4ca56b98fe53ab25a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-11T03:55:19Z\\\",\\\"message\\\":\\\"W1011 03:55:08.564227 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1011 03:55:08.564630 1 crypto.go:601] Generating new CA for check-endpoints-signer@1760154908 cert, and key in /tmp/serving-cert-1527351980/serving-signer.crt, /tmp/serving-cert-1527351980/serving-signer.key\\\\nI1011 03:55:08.869630 1 observer_polling.go:159] Starting file observer\\\\nW1011 03:55:08.874583 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1011 03:55:08.874721 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1011 03:55:08.878058 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1527351980/tls.crt::/tmp/serving-cert-1527351980/tls.key\\\\\\\"\\\\nF1011 03:55:19.813421 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:08Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://58ca7c079d81ba990c4508c024d6db7c3503be1d628477798459b32e2374ef22\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:08Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1171e7b719c532d99d1d222fc5dbc434060d5d149bd2b6b3dea02f9716816b26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1171e7b719c532d99d1d222fc5dbc434060d5d149bd2b6b3dea02f9716816b26\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:05Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:37Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:37 crc kubenswrapper[4798]: I1011 03:55:37.842140 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2f4f8842-e378-44a9-b919-b1bf9a8df80e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://903e13c07c622087e30f21ffff2b60c31c852d8e994c23d7af6beab5a71bb873\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1087e7b8de486a192155ed94497edbca0dc98e4a84a577267fb132fc48e6b142\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://08a33c11cee6a48ebcf66f251111b8bd888bd40fe6b52f43a3768eac817ed59f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eff419f507fc5362e956419c6fd14dc6a161f7297b026a5ec449647051f98767\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:05Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:37Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:37 crc kubenswrapper[4798]: I1011 03:55:37.859445 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4936cbe98df337a2b4d0d6e3effe0afd990facff65daf923c475c9108e05fef9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:37Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:37 crc kubenswrapper[4798]: I1011 03:55:37.873612 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:37Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:37 crc kubenswrapper[4798]: I1011 03:55:37.883409 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kszgd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e999fc8d-dd8d-415f-b72b-1c6189cf18a9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a92a11778001614d226c95af9dde787c78232568c3b5c073618a843f1225b21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h6wm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kszgd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:37Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:37 crc kubenswrapper[4798]: I1011 03:55:37.886879 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:37 crc kubenswrapper[4798]: I1011 03:55:37.886921 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:37 crc kubenswrapper[4798]: I1011 03:55:37.886933 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:37 crc kubenswrapper[4798]: I1011 03:55:37.886950 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:37 crc kubenswrapper[4798]: I1011 03:55:37.886964 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:37Z","lastTransitionTime":"2025-10-11T03:55:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:37 crc kubenswrapper[4798]: I1011 03:55:37.897047 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:37Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:37 crc kubenswrapper[4798]: I1011 03:55:37.911952 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5ec61df088ad49a0392264c33da84b7324c4aa608201d71e309401ab80013fb8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://10697a88b117962307b632f70354acdfd493e92c2909908a31c90c754dcbc0ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:37Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:37 crc kubenswrapper[4798]: I1011 03:55:37.990602 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:37 crc kubenswrapper[4798]: I1011 03:55:37.991114 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:37 crc kubenswrapper[4798]: I1011 03:55:37.991312 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:37 crc kubenswrapper[4798]: I1011 03:55:37.991543 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:37 crc kubenswrapper[4798]: I1011 03:55:37.991679 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:37Z","lastTransitionTime":"2025-10-11T03:55:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:38 crc kubenswrapper[4798]: I1011 03:55:38.094501 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:38 crc kubenswrapper[4798]: I1011 03:55:38.094579 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:38 crc kubenswrapper[4798]: I1011 03:55:38.094599 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:38 crc kubenswrapper[4798]: I1011 03:55:38.094628 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:38 crc kubenswrapper[4798]: I1011 03:55:38.094645 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:38Z","lastTransitionTime":"2025-10-11T03:55:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:38 crc kubenswrapper[4798]: I1011 03:55:38.197490 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:38 crc kubenswrapper[4798]: I1011 03:55:38.197548 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:38 crc kubenswrapper[4798]: I1011 03:55:38.197563 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:38 crc kubenswrapper[4798]: I1011 03:55:38.197588 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:38 crc kubenswrapper[4798]: I1011 03:55:38.197607 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:38Z","lastTransitionTime":"2025-10-11T03:55:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:38 crc kubenswrapper[4798]: I1011 03:55:38.300839 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:38 crc kubenswrapper[4798]: I1011 03:55:38.300920 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:38 crc kubenswrapper[4798]: I1011 03:55:38.300942 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:38 crc kubenswrapper[4798]: I1011 03:55:38.300979 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:38 crc kubenswrapper[4798]: I1011 03:55:38.301007 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:38Z","lastTransitionTime":"2025-10-11T03:55:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:38 crc kubenswrapper[4798]: I1011 03:55:38.404429 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:38 crc kubenswrapper[4798]: I1011 03:55:38.404469 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:38 crc kubenswrapper[4798]: I1011 03:55:38.404481 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:38 crc kubenswrapper[4798]: I1011 03:55:38.404497 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:38 crc kubenswrapper[4798]: I1011 03:55:38.404508 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:38Z","lastTransitionTime":"2025-10-11T03:55:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:38 crc kubenswrapper[4798]: I1011 03:55:38.467060 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-svt4z" Oct 11 03:55:38 crc kubenswrapper[4798]: I1011 03:55:38.507653 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:38 crc kubenswrapper[4798]: I1011 03:55:38.507713 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:38 crc kubenswrapper[4798]: I1011 03:55:38.507731 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:38 crc kubenswrapper[4798]: I1011 03:55:38.507754 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:38 crc kubenswrapper[4798]: I1011 03:55:38.507772 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:38Z","lastTransitionTime":"2025-10-11T03:55:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:38 crc kubenswrapper[4798]: I1011 03:55:38.611040 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:38 crc kubenswrapper[4798]: I1011 03:55:38.611093 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:38 crc kubenswrapper[4798]: I1011 03:55:38.611104 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:38 crc kubenswrapper[4798]: I1011 03:55:38.611126 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:38 crc kubenswrapper[4798]: I1011 03:55:38.611144 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:38Z","lastTransitionTime":"2025-10-11T03:55:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:38 crc kubenswrapper[4798]: I1011 03:55:38.697226 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-svt4z_3c2a342b-5252-4957-9e2c-8f81c304b8af/ovnkube-controller/1.log" Oct 11 03:55:38 crc kubenswrapper[4798]: I1011 03:55:38.697835 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-svt4z_3c2a342b-5252-4957-9e2c-8f81c304b8af/ovnkube-controller/0.log" Oct 11 03:55:38 crc kubenswrapper[4798]: I1011 03:55:38.701301 4798 generic.go:334] "Generic (PLEG): container finished" podID="3c2a342b-5252-4957-9e2c-8f81c304b8af" containerID="0c1c7c6aa877690a8caf39779fec8180efdbf7169264c931008c3f639d938242" exitCode=1 Oct 11 03:55:38 crc kubenswrapper[4798]: I1011 03:55:38.701443 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-svt4z" event={"ID":"3c2a342b-5252-4957-9e2c-8f81c304b8af","Type":"ContainerDied","Data":"0c1c7c6aa877690a8caf39779fec8180efdbf7169264c931008c3f639d938242"} Oct 11 03:55:38 crc kubenswrapper[4798]: I1011 03:55:38.701496 4798 scope.go:117] "RemoveContainer" containerID="f74df615a5e83aeb9df24b50315da33c1d6d05d5c25198463d48b0463966d797" Oct 11 03:55:38 crc kubenswrapper[4798]: I1011 03:55:38.701942 4798 scope.go:117] "RemoveContainer" containerID="0c1c7c6aa877690a8caf39779fec8180efdbf7169264c931008c3f639d938242" Oct 11 03:55:38 crc kubenswrapper[4798]: E1011 03:55:38.702175 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-svt4z_openshift-ovn-kubernetes(3c2a342b-5252-4957-9e2c-8f81c304b8af)\"" pod="openshift-ovn-kubernetes/ovnkube-node-svt4z" podUID="3c2a342b-5252-4957-9e2c-8f81c304b8af" Oct 11 03:55:38 crc kubenswrapper[4798]: I1011 03:55:38.723530 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:38 crc kubenswrapper[4798]: I1011 03:55:38.723575 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:38 crc kubenswrapper[4798]: I1011 03:55:38.723584 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:38 crc kubenswrapper[4798]: I1011 03:55:38.723598 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:38 crc kubenswrapper[4798]: I1011 03:55:38.723608 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:38Z","lastTransitionTime":"2025-10-11T03:55:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:38 crc kubenswrapper[4798]: I1011 03:55:38.730438 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5ec61df088ad49a0392264c33da84b7324c4aa608201d71e309401ab80013fb8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://10697a88b117962307b632f70354acdfd493e92c2909908a31c90c754dcbc0ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:38Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:38 crc kubenswrapper[4798]: I1011 03:55:38.745760 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-p6xdd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bd9c0e44-3329-422a-907b-e9e9bb6194cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a46e4bfff29af67bd6304e68df8fb4aa7e2184648b85398f453fde878b27d825\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p9wst\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-p6xdd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:38Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:38 crc kubenswrapper[4798]: I1011 03:55:38.761251 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6129cc4f78a62c5fb76f4859ec954c8901954d27b7f05672e97d992e0c933191\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:38Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:38 crc kubenswrapper[4798]: I1011 03:55:38.761947 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-5sxhs"] Oct 11 03:55:38 crc kubenswrapper[4798]: I1011 03:55:38.762551 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-5sxhs" Oct 11 03:55:38 crc kubenswrapper[4798]: I1011 03:55:38.766537 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert" Oct 11 03:55:38 crc kubenswrapper[4798]: I1011 03:55:38.766944 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-control-plane-dockercfg-gs7dd" Oct 11 03:55:38 crc kubenswrapper[4798]: I1011 03:55:38.783035 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-svt4z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3c2a342b-5252-4957-9e2c-8f81c304b8af\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5a66de04a274f07f090f66d9da727e1985f9c171038de366b409a0300d1b6cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5667e3d7288833ff8c576f64f111c3a2f59619a3577f969d0afdd633962bd30d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dd63d88fe2ebe6035765ee3f3c05c2ef747f9b2869da239825c08667935c9fb5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://671945e25068ff79ab8574dd96b2d81646b8240da41f34a10aa857f0c2520df2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://337fb496a29d8027f6400b57b1625b68029a16e368139f753b9b94df22448477\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c9529e6c53d42e111799753c41ef620b382b2a2b1991aa71d4ee846029a5136\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0c1c7c6aa877690a8caf39779fec8180efdbf7169264c931008c3f639d938242\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f74df615a5e83aeb9df24b50315da33c1d6d05d5c25198463d48b0463966d797\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-11T03:55:35Z\\\",\\\"message\\\":\\\"val\\\\nI1011 03:55:35.886832 6105 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1011 03:55:35.886853 6105 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1011 03:55:35.887159 6105 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1011 03:55:35.887199 6105 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1011 03:55:35.887235 6105 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1011 03:55:35.887241 6105 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1011 03:55:35.887242 6105 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1011 03:55:35.887256 6105 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1011 03:55:35.887323 6105 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1011 03:55:35.887361 6105 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1011 03:55:35.887385 6105 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1011 03:55:35.887372 6105 handler.go:208] Removed *v1.Node event handler 2\\\\nI1011 03:55:35.887443 6105 handler.go:208] Removed *v1.Node event handler 7\\\\nI1011 03:55:35.887509 6105 factory.go:656] Stopping watch factory\\\\nI1011 03:55:35.887539 6105 ovnkube.go:599] Stopped ovnkube\\\\nI1011 03:55:35.887518 6105 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:32Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0c1c7c6aa877690a8caf39779fec8180efdbf7169264c931008c3f639d938242\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-11T03:55:38Z\\\",\\\"message\\\":\\\"gressip/v1/apis/informers/externalversions/factory.go:140\\\\nI1011 03:55:37.822041 6251 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1011 03:55:37.822100 6251 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1011 03:55:37.822166 6251 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1011 03:55:37.822165 6251 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1011 03:55:37.822204 6251 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1011 03:55:37.822214 6251 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1011 03:55:37.822229 6251 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1011 03:55:37.822237 6251 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1011 03:55:37.822241 6251 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1011 03:55:37.822251 6251 factory.go:656] Stopping watch factory\\\\nI1011 03:55:37.822260 6251 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1011 03:55:37.822266 6251 ovnkube.go:599] Stopped ovnkube\\\\nI1011 03:55:37.822269 6251 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1011 03:55:37.822277 6251 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1011 03:55:37.822302 6251 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1011 03:55:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://732433bbe785758ed4cbedf667608c4780a5d1e7e2eafd432735f0d74ea01d48\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9f8c139ffa0a5dc24b40f6d5bcc2fa9422f58b275692d3ce3ba5a928c1e454a7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f8c139ffa0a5dc24b40f6d5bcc2fa9422f58b275692d3ce3ba5a928c1e454a7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:26Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-svt4z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:38Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:38 crc kubenswrapper[4798]: I1011 03:55:38.792981 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-cwk9h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a97c4ffd-df63-4ce6-8903-80849a23a9fa\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3dc58b162f1d2b3447282c149ec7cd6d60b4c53fa3cf51dfb4a821d115c9a2c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pfvbf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:28Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-cwk9h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:38Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:38 crc kubenswrapper[4798]: I1011 03:55:38.804923 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:38Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:38 crc kubenswrapper[4798]: I1011 03:55:38.818171 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42571bc8-2186-4e3b-bba9-28f5a8f364d0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c66c40f71f0877eee058976f94625fe1c4bcab5dfad363be39e6863c2e9f07a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5gdw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d4862faa12fa49bd57098b8b4e9e9dd287da8c8808a9ca32bfdd3a9b4313945d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5gdw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-h28s2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:38Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:38 crc kubenswrapper[4798]: I1011 03:55:38.826076 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:38 crc kubenswrapper[4798]: I1011 03:55:38.826114 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:38 crc kubenswrapper[4798]: I1011 03:55:38.826126 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:38 crc kubenswrapper[4798]: I1011 03:55:38.826141 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:38 crc kubenswrapper[4798]: I1011 03:55:38.826151 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:38Z","lastTransitionTime":"2025-10-11T03:55:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:38 crc kubenswrapper[4798]: I1011 03:55:38.833458 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-7fk5l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4b6a179c-1d23-4cb2-91b9-d34496a74456\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5bfabccc875a12072ad81dc6bf74d849c99baa004c10d213f798c363248137db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b908ba64c3d6d1d45e1cab82e69176c3c84bf88dde3e9425fc7c3b4c84685ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8b908ba64c3d6d1d45e1cab82e69176c3c84bf88dde3e9425fc7c3b4c84685ed\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae38c0781a4a8367bba95648253e72409e06cdc39825b137084fa6f4d33a4cb7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ae38c0781a4a8367bba95648253e72409e06cdc39825b137084fa6f4d33a4cb7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b663ffff34d80940880b3959c81519084b55bd5edcedbce9402d5fad1f898034\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b663ffff34d80940880b3959c81519084b55bd5edcedbce9402d5fad1f898034\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5b4df13fbe7bbb9ba49f772c2bd4bfe6a10a17bfa12bc500f0647659be7b0cdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5b4df13fbe7bbb9ba49f772c2bd4bfe6a10a17bfa12bc500f0647659be7b0cdf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://997fd5172ccd393f7befe4343619dd30eba136428035ac9fc71f0ec99791a53b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://997fd5172ccd393f7befe4343619dd30eba136428035ac9fc71f0ec99791a53b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://20198b637812738041fd2e57cdb24097b8f92af726b1ad103fee423469a27136\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://20198b637812738041fd2e57cdb24097b8f92af726b1ad103fee423469a27136\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-7fk5l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:38Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:38 crc kubenswrapper[4798]: I1011 03:55:38.846503 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:38Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:38 crc kubenswrapper[4798]: I1011 03:55:38.855691 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kszgd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e999fc8d-dd8d-415f-b72b-1c6189cf18a9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a92a11778001614d226c95af9dde787c78232568c3b5c073618a843f1225b21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h6wm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kszgd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:38Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:38 crc kubenswrapper[4798]: I1011 03:55:38.868192 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:38Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:38 crc kubenswrapper[4798]: I1011 03:55:38.880702 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bfd670e2-5a6d-4d56-97d6-c72b5474cfe6\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d79289a0b2f4d9eee836f57ba898bd329060194772e80855547f1982371b0921\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1970a648bf4ee991b9b58bfade524bee260b45f727cba5864a6545a10487b3ae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a79edde80930f3aad632c14d6ad6b3df6f8449f32107a82eb906785e873e1ab7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4b8e09f509a9f25922d49024be407873d79d01edb604193f9d19fcd6c89cf96d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://805f894634e76814dc3071315ddd3d8ac46cdf65a12e97f4ca56b98fe53ab25a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-11T03:55:19Z\\\",\\\"message\\\":\\\"W1011 03:55:08.564227 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1011 03:55:08.564630 1 crypto.go:601] Generating new CA for check-endpoints-signer@1760154908 cert, and key in /tmp/serving-cert-1527351980/serving-signer.crt, /tmp/serving-cert-1527351980/serving-signer.key\\\\nI1011 03:55:08.869630 1 observer_polling.go:159] Starting file observer\\\\nW1011 03:55:08.874583 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1011 03:55:08.874721 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1011 03:55:08.878058 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1527351980/tls.crt::/tmp/serving-cert-1527351980/tls.key\\\\\\\"\\\\nF1011 03:55:19.813421 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:08Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://58ca7c079d81ba990c4508c024d6db7c3503be1d628477798459b32e2374ef22\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:08Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1171e7b719c532d99d1d222fc5dbc434060d5d149bd2b6b3dea02f9716816b26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1171e7b719c532d99d1d222fc5dbc434060d5d149bd2b6b3dea02f9716816b26\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:05Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:38Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:38 crc kubenswrapper[4798]: I1011 03:55:38.891866 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2f4f8842-e378-44a9-b919-b1bf9a8df80e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://903e13c07c622087e30f21ffff2b60c31c852d8e994c23d7af6beab5a71bb873\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1087e7b8de486a192155ed94497edbca0dc98e4a84a577267fb132fc48e6b142\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://08a33c11cee6a48ebcf66f251111b8bd888bd40fe6b52f43a3768eac817ed59f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eff419f507fc5362e956419c6fd14dc6a161f7297b026a5ec449647051f98767\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:05Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:38Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:38 crc kubenswrapper[4798]: I1011 03:55:38.901932 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4936cbe98df337a2b4d0d6e3effe0afd990facff65daf923c475c9108e05fef9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:38Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:38 crc kubenswrapper[4798]: I1011 03:55:38.903948 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/89cbb20d-9fdb-4fed-9ad2-acb1207afc46-env-overrides\") pod \"ovnkube-control-plane-749d76644c-5sxhs\" (UID: \"89cbb20d-9fdb-4fed-9ad2-acb1207afc46\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-5sxhs" Oct 11 03:55:38 crc kubenswrapper[4798]: I1011 03:55:38.903978 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z7jvd\" (UniqueName: \"kubernetes.io/projected/89cbb20d-9fdb-4fed-9ad2-acb1207afc46-kube-api-access-z7jvd\") pod \"ovnkube-control-plane-749d76644c-5sxhs\" (UID: \"89cbb20d-9fdb-4fed-9ad2-acb1207afc46\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-5sxhs" Oct 11 03:55:38 crc kubenswrapper[4798]: I1011 03:55:38.903998 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/89cbb20d-9fdb-4fed-9ad2-acb1207afc46-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-5sxhs\" (UID: \"89cbb20d-9fdb-4fed-9ad2-acb1207afc46\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-5sxhs" Oct 11 03:55:38 crc kubenswrapper[4798]: I1011 03:55:38.904031 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/89cbb20d-9fdb-4fed-9ad2-acb1207afc46-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-5sxhs\" (UID: \"89cbb20d-9fdb-4fed-9ad2-acb1207afc46\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-5sxhs" Oct 11 03:55:38 crc kubenswrapper[4798]: I1011 03:55:38.911989 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5ec61df088ad49a0392264c33da84b7324c4aa608201d71e309401ab80013fb8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://10697a88b117962307b632f70354acdfd493e92c2909908a31c90c754dcbc0ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:38Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:38 crc kubenswrapper[4798]: I1011 03:55:38.926615 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6129cc4f78a62c5fb76f4859ec954c8901954d27b7f05672e97d992e0c933191\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:38Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:38 crc kubenswrapper[4798]: I1011 03:55:38.927934 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:38 crc kubenswrapper[4798]: I1011 03:55:38.927976 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:38 crc kubenswrapper[4798]: I1011 03:55:38.927988 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:38 crc kubenswrapper[4798]: I1011 03:55:38.928004 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:38 crc kubenswrapper[4798]: I1011 03:55:38.928015 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:38Z","lastTransitionTime":"2025-10-11T03:55:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:38 crc kubenswrapper[4798]: I1011 03:55:38.939022 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-p6xdd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bd9c0e44-3329-422a-907b-e9e9bb6194cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a46e4bfff29af67bd6304e68df8fb4aa7e2184648b85398f453fde878b27d825\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p9wst\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-p6xdd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:38Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:38 crc kubenswrapper[4798]: I1011 03:55:38.950503 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-5sxhs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"89cbb20d-9fdb-4fed-9ad2-acb1207afc46\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z7jvd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z7jvd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-5sxhs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:38Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:38 crc kubenswrapper[4798]: I1011 03:55:38.962031 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:38Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:38 crc kubenswrapper[4798]: I1011 03:55:38.972069 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42571bc8-2186-4e3b-bba9-28f5a8f364d0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c66c40f71f0877eee058976f94625fe1c4bcab5dfad363be39e6863c2e9f07a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5gdw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d4862faa12fa49bd57098b8b4e9e9dd287da8c8808a9ca32bfdd3a9b4313945d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5gdw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-h28s2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:38Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:38 crc kubenswrapper[4798]: I1011 03:55:38.986013 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-7fk5l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4b6a179c-1d23-4cb2-91b9-d34496a74456\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5bfabccc875a12072ad81dc6bf74d849c99baa004c10d213f798c363248137db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b908ba64c3d6d1d45e1cab82e69176c3c84bf88dde3e9425fc7c3b4c84685ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8b908ba64c3d6d1d45e1cab82e69176c3c84bf88dde3e9425fc7c3b4c84685ed\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae38c0781a4a8367bba95648253e72409e06cdc39825b137084fa6f4d33a4cb7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ae38c0781a4a8367bba95648253e72409e06cdc39825b137084fa6f4d33a4cb7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b663ffff34d80940880b3959c81519084b55bd5edcedbce9402d5fad1f898034\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b663ffff34d80940880b3959c81519084b55bd5edcedbce9402d5fad1f898034\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5b4df13fbe7bbb9ba49f772c2bd4bfe6a10a17bfa12bc500f0647659be7b0cdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5b4df13fbe7bbb9ba49f772c2bd4bfe6a10a17bfa12bc500f0647659be7b0cdf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://997fd5172ccd393f7befe4343619dd30eba136428035ac9fc71f0ec99791a53b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://997fd5172ccd393f7befe4343619dd30eba136428035ac9fc71f0ec99791a53b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://20198b637812738041fd2e57cdb24097b8f92af726b1ad103fee423469a27136\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://20198b637812738041fd2e57cdb24097b8f92af726b1ad103fee423469a27136\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-7fk5l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:38Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:39 crc kubenswrapper[4798]: I1011 03:55:39.003480 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-svt4z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3c2a342b-5252-4957-9e2c-8f81c304b8af\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5a66de04a274f07f090f66d9da727e1985f9c171038de366b409a0300d1b6cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5667e3d7288833ff8c576f64f111c3a2f59619a3577f969d0afdd633962bd30d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dd63d88fe2ebe6035765ee3f3c05c2ef747f9b2869da239825c08667935c9fb5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://671945e25068ff79ab8574dd96b2d81646b8240da41f34a10aa857f0c2520df2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://337fb496a29d8027f6400b57b1625b68029a16e368139f753b9b94df22448477\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c9529e6c53d42e111799753c41ef620b382b2a2b1991aa71d4ee846029a5136\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0c1c7c6aa877690a8caf39779fec8180efdbf7169264c931008c3f639d938242\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f74df615a5e83aeb9df24b50315da33c1d6d05d5c25198463d48b0463966d797\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-11T03:55:35Z\\\",\\\"message\\\":\\\"val\\\\nI1011 03:55:35.886832 6105 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1011 03:55:35.886853 6105 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1011 03:55:35.887159 6105 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1011 03:55:35.887199 6105 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1011 03:55:35.887235 6105 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1011 03:55:35.887241 6105 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1011 03:55:35.887242 6105 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1011 03:55:35.887256 6105 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1011 03:55:35.887323 6105 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1011 03:55:35.887361 6105 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1011 03:55:35.887385 6105 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1011 03:55:35.887372 6105 handler.go:208] Removed *v1.Node event handler 2\\\\nI1011 03:55:35.887443 6105 handler.go:208] Removed *v1.Node event handler 7\\\\nI1011 03:55:35.887509 6105 factory.go:656] Stopping watch factory\\\\nI1011 03:55:35.887539 6105 ovnkube.go:599] Stopped ovnkube\\\\nI1011 03:55:35.887518 6105 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:32Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0c1c7c6aa877690a8caf39779fec8180efdbf7169264c931008c3f639d938242\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-11T03:55:38Z\\\",\\\"message\\\":\\\"gressip/v1/apis/informers/externalversions/factory.go:140\\\\nI1011 03:55:37.822041 6251 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1011 03:55:37.822100 6251 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1011 03:55:37.822166 6251 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1011 03:55:37.822165 6251 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1011 03:55:37.822204 6251 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1011 03:55:37.822214 6251 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1011 03:55:37.822229 6251 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1011 03:55:37.822237 6251 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1011 03:55:37.822241 6251 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1011 03:55:37.822251 6251 factory.go:656] Stopping watch factory\\\\nI1011 03:55:37.822260 6251 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1011 03:55:37.822266 6251 ovnkube.go:599] Stopped ovnkube\\\\nI1011 03:55:37.822269 6251 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1011 03:55:37.822277 6251 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1011 03:55:37.822302 6251 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1011 03:55:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://732433bbe785758ed4cbedf667608c4780a5d1e7e2eafd432735f0d74ea01d48\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9f8c139ffa0a5dc24b40f6d5bcc2fa9422f58b275692d3ce3ba5a928c1e454a7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f8c139ffa0a5dc24b40f6d5bcc2fa9422f58b275692d3ce3ba5a928c1e454a7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:26Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-svt4z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:39Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:39 crc kubenswrapper[4798]: I1011 03:55:39.005750 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/89cbb20d-9fdb-4fed-9ad2-acb1207afc46-env-overrides\") pod \"ovnkube-control-plane-749d76644c-5sxhs\" (UID: \"89cbb20d-9fdb-4fed-9ad2-acb1207afc46\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-5sxhs" Oct 11 03:55:39 crc kubenswrapper[4798]: I1011 03:55:39.005785 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z7jvd\" (UniqueName: \"kubernetes.io/projected/89cbb20d-9fdb-4fed-9ad2-acb1207afc46-kube-api-access-z7jvd\") pod \"ovnkube-control-plane-749d76644c-5sxhs\" (UID: \"89cbb20d-9fdb-4fed-9ad2-acb1207afc46\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-5sxhs" Oct 11 03:55:39 crc kubenswrapper[4798]: I1011 03:55:39.005844 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/89cbb20d-9fdb-4fed-9ad2-acb1207afc46-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-5sxhs\" (UID: \"89cbb20d-9fdb-4fed-9ad2-acb1207afc46\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-5sxhs" Oct 11 03:55:39 crc kubenswrapper[4798]: I1011 03:55:39.005868 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/89cbb20d-9fdb-4fed-9ad2-acb1207afc46-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-5sxhs\" (UID: \"89cbb20d-9fdb-4fed-9ad2-acb1207afc46\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-5sxhs" Oct 11 03:55:39 crc kubenswrapper[4798]: I1011 03:55:39.006307 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/89cbb20d-9fdb-4fed-9ad2-acb1207afc46-env-overrides\") pod \"ovnkube-control-plane-749d76644c-5sxhs\" (UID: \"89cbb20d-9fdb-4fed-9ad2-acb1207afc46\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-5sxhs" Oct 11 03:55:39 crc kubenswrapper[4798]: I1011 03:55:39.006631 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/89cbb20d-9fdb-4fed-9ad2-acb1207afc46-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-5sxhs\" (UID: \"89cbb20d-9fdb-4fed-9ad2-acb1207afc46\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-5sxhs" Oct 11 03:55:39 crc kubenswrapper[4798]: I1011 03:55:39.013596 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/89cbb20d-9fdb-4fed-9ad2-acb1207afc46-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-5sxhs\" (UID: \"89cbb20d-9fdb-4fed-9ad2-acb1207afc46\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-5sxhs" Oct 11 03:55:39 crc kubenswrapper[4798]: I1011 03:55:39.013744 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-cwk9h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a97c4ffd-df63-4ce6-8903-80849a23a9fa\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3dc58b162f1d2b3447282c149ec7cd6d60b4c53fa3cf51dfb4a821d115c9a2c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pfvbf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:28Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-cwk9h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:39Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:39 crc kubenswrapper[4798]: I1011 03:55:39.021879 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z7jvd\" (UniqueName: \"kubernetes.io/projected/89cbb20d-9fdb-4fed-9ad2-acb1207afc46-kube-api-access-z7jvd\") pod \"ovnkube-control-plane-749d76644c-5sxhs\" (UID: \"89cbb20d-9fdb-4fed-9ad2-acb1207afc46\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-5sxhs" Oct 11 03:55:39 crc kubenswrapper[4798]: I1011 03:55:39.027313 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bfd670e2-5a6d-4d56-97d6-c72b5474cfe6\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d79289a0b2f4d9eee836f57ba898bd329060194772e80855547f1982371b0921\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1970a648bf4ee991b9b58bfade524bee260b45f727cba5864a6545a10487b3ae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a79edde80930f3aad632c14d6ad6b3df6f8449f32107a82eb906785e873e1ab7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4b8e09f509a9f25922d49024be407873d79d01edb604193f9d19fcd6c89cf96d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://805f894634e76814dc3071315ddd3d8ac46cdf65a12e97f4ca56b98fe53ab25a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-11T03:55:19Z\\\",\\\"message\\\":\\\"W1011 03:55:08.564227 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1011 03:55:08.564630 1 crypto.go:601] Generating new CA for check-endpoints-signer@1760154908 cert, and key in /tmp/serving-cert-1527351980/serving-signer.crt, /tmp/serving-cert-1527351980/serving-signer.key\\\\nI1011 03:55:08.869630 1 observer_polling.go:159] Starting file observer\\\\nW1011 03:55:08.874583 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1011 03:55:08.874721 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1011 03:55:08.878058 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1527351980/tls.crt::/tmp/serving-cert-1527351980/tls.key\\\\\\\"\\\\nF1011 03:55:19.813421 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:08Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://58ca7c079d81ba990c4508c024d6db7c3503be1d628477798459b32e2374ef22\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:08Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1171e7b719c532d99d1d222fc5dbc434060d5d149bd2b6b3dea02f9716816b26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1171e7b719c532d99d1d222fc5dbc434060d5d149bd2b6b3dea02f9716816b26\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:05Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:39Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:39 crc kubenswrapper[4798]: I1011 03:55:39.030329 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:39 crc kubenswrapper[4798]: I1011 03:55:39.030351 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:39 crc kubenswrapper[4798]: I1011 03:55:39.030360 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:39 crc kubenswrapper[4798]: I1011 03:55:39.030376 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:39 crc kubenswrapper[4798]: I1011 03:55:39.030385 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:39Z","lastTransitionTime":"2025-10-11T03:55:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:39 crc kubenswrapper[4798]: I1011 03:55:39.040842 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2f4f8842-e378-44a9-b919-b1bf9a8df80e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://903e13c07c622087e30f21ffff2b60c31c852d8e994c23d7af6beab5a71bb873\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1087e7b8de486a192155ed94497edbca0dc98e4a84a577267fb132fc48e6b142\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://08a33c11cee6a48ebcf66f251111b8bd888bd40fe6b52f43a3768eac817ed59f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eff419f507fc5362e956419c6fd14dc6a161f7297b026a5ec449647051f98767\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:05Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:39Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:39 crc kubenswrapper[4798]: I1011 03:55:39.051672 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4936cbe98df337a2b4d0d6e3effe0afd990facff65daf923c475c9108e05fef9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:39Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:39 crc kubenswrapper[4798]: I1011 03:55:39.062553 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:39Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:39 crc kubenswrapper[4798]: I1011 03:55:39.070598 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kszgd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e999fc8d-dd8d-415f-b72b-1c6189cf18a9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a92a11778001614d226c95af9dde787c78232568c3b5c073618a843f1225b21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h6wm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kszgd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:39Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:39 crc kubenswrapper[4798]: I1011 03:55:39.078243 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-5sxhs" Oct 11 03:55:39 crc kubenswrapper[4798]: I1011 03:55:39.082557 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:39Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:39 crc kubenswrapper[4798]: W1011 03:55:39.091323 4798 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod89cbb20d_9fdb_4fed_9ad2_acb1207afc46.slice/crio-77de730368a0c0a499b8cf86db05c308dad0604b036216621bfb1afe5ce95c48 WatchSource:0}: Error finding container 77de730368a0c0a499b8cf86db05c308dad0604b036216621bfb1afe5ce95c48: Status 404 returned error can't find the container with id 77de730368a0c0a499b8cf86db05c308dad0604b036216621bfb1afe5ce95c48 Oct 11 03:55:39 crc kubenswrapper[4798]: I1011 03:55:39.135687 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:39 crc kubenswrapper[4798]: I1011 03:55:39.135728 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:39 crc kubenswrapper[4798]: I1011 03:55:39.135737 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:39 crc kubenswrapper[4798]: I1011 03:55:39.135753 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:39 crc kubenswrapper[4798]: I1011 03:55:39.135762 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:39Z","lastTransitionTime":"2025-10-11T03:55:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:39 crc kubenswrapper[4798]: I1011 03:55:39.239429 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:39 crc kubenswrapper[4798]: I1011 03:55:39.239475 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:39 crc kubenswrapper[4798]: I1011 03:55:39.239485 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:39 crc kubenswrapper[4798]: I1011 03:55:39.239501 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:39 crc kubenswrapper[4798]: I1011 03:55:39.239514 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:39Z","lastTransitionTime":"2025-10-11T03:55:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:39 crc kubenswrapper[4798]: I1011 03:55:39.341966 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:39 crc kubenswrapper[4798]: I1011 03:55:39.342018 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:39 crc kubenswrapper[4798]: I1011 03:55:39.342031 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:39 crc kubenswrapper[4798]: I1011 03:55:39.342050 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:39 crc kubenswrapper[4798]: I1011 03:55:39.342062 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:39Z","lastTransitionTime":"2025-10-11T03:55:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:39 crc kubenswrapper[4798]: I1011 03:55:39.422830 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 11 03:55:39 crc kubenswrapper[4798]: I1011 03:55:39.422995 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 11 03:55:39 crc kubenswrapper[4798]: E1011 03:55:39.423093 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 11 03:55:39 crc kubenswrapper[4798]: E1011 03:55:39.423323 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 11 03:55:39 crc kubenswrapper[4798]: I1011 03:55:39.423597 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 11 03:55:39 crc kubenswrapper[4798]: E1011 03:55:39.423710 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 11 03:55:39 crc kubenswrapper[4798]: I1011 03:55:39.444211 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:39 crc kubenswrapper[4798]: I1011 03:55:39.444258 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:39 crc kubenswrapper[4798]: I1011 03:55:39.444267 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:39 crc kubenswrapper[4798]: I1011 03:55:39.444282 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:39 crc kubenswrapper[4798]: I1011 03:55:39.444291 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:39Z","lastTransitionTime":"2025-10-11T03:55:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:39 crc kubenswrapper[4798]: I1011 03:55:39.486213 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/network-metrics-daemon-5bfzt"] Oct 11 03:55:39 crc kubenswrapper[4798]: I1011 03:55:39.486805 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-5bfzt" Oct 11 03:55:39 crc kubenswrapper[4798]: E1011 03:55:39.486865 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-5bfzt" podUID="3cf2b185-9a26-4448-8fc5-4885f98daf87" Oct 11 03:55:39 crc kubenswrapper[4798]: I1011 03:55:39.498239 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kszgd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e999fc8d-dd8d-415f-b72b-1c6189cf18a9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a92a11778001614d226c95af9dde787c78232568c3b5c073618a843f1225b21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h6wm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kszgd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:39Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:39 crc kubenswrapper[4798]: I1011 03:55:39.511000 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:39Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:39 crc kubenswrapper[4798]: I1011 03:55:39.526094 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bfd670e2-5a6d-4d56-97d6-c72b5474cfe6\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d79289a0b2f4d9eee836f57ba898bd329060194772e80855547f1982371b0921\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1970a648bf4ee991b9b58bfade524bee260b45f727cba5864a6545a10487b3ae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a79edde80930f3aad632c14d6ad6b3df6f8449f32107a82eb906785e873e1ab7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4b8e09f509a9f25922d49024be407873d79d01edb604193f9d19fcd6c89cf96d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://805f894634e76814dc3071315ddd3d8ac46cdf65a12e97f4ca56b98fe53ab25a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-11T03:55:19Z\\\",\\\"message\\\":\\\"W1011 03:55:08.564227 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1011 03:55:08.564630 1 crypto.go:601] Generating new CA for check-endpoints-signer@1760154908 cert, and key in /tmp/serving-cert-1527351980/serving-signer.crt, /tmp/serving-cert-1527351980/serving-signer.key\\\\nI1011 03:55:08.869630 1 observer_polling.go:159] Starting file observer\\\\nW1011 03:55:08.874583 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1011 03:55:08.874721 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1011 03:55:08.878058 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1527351980/tls.crt::/tmp/serving-cert-1527351980/tls.key\\\\\\\"\\\\nF1011 03:55:19.813421 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:08Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://58ca7c079d81ba990c4508c024d6db7c3503be1d628477798459b32e2374ef22\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:08Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1171e7b719c532d99d1d222fc5dbc434060d5d149bd2b6b3dea02f9716816b26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1171e7b719c532d99d1d222fc5dbc434060d5d149bd2b6b3dea02f9716816b26\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:05Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:39Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:39 crc kubenswrapper[4798]: I1011 03:55:39.545914 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2f4f8842-e378-44a9-b919-b1bf9a8df80e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://903e13c07c622087e30f21ffff2b60c31c852d8e994c23d7af6beab5a71bb873\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1087e7b8de486a192155ed94497edbca0dc98e4a84a577267fb132fc48e6b142\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://08a33c11cee6a48ebcf66f251111b8bd888bd40fe6b52f43a3768eac817ed59f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eff419f507fc5362e956419c6fd14dc6a161f7297b026a5ec449647051f98767\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:05Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:39Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:39 crc kubenswrapper[4798]: I1011 03:55:39.546901 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:39 crc kubenswrapper[4798]: I1011 03:55:39.546927 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:39 crc kubenswrapper[4798]: I1011 03:55:39.546938 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:39 crc kubenswrapper[4798]: I1011 03:55:39.546957 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:39 crc kubenswrapper[4798]: I1011 03:55:39.546971 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:39Z","lastTransitionTime":"2025-10-11T03:55:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:39 crc kubenswrapper[4798]: I1011 03:55:39.557897 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4936cbe98df337a2b4d0d6e3effe0afd990facff65daf923c475c9108e05fef9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:39Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:39 crc kubenswrapper[4798]: I1011 03:55:39.573733 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:39Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:39 crc kubenswrapper[4798]: I1011 03:55:39.596676 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5ec61df088ad49a0392264c33da84b7324c4aa608201d71e309401ab80013fb8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://10697a88b117962307b632f70354acdfd493e92c2909908a31c90c754dcbc0ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:39Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:39 crc kubenswrapper[4798]: I1011 03:55:39.610996 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7jdt7\" (UniqueName: \"kubernetes.io/projected/3cf2b185-9a26-4448-8fc5-4885f98daf87-kube-api-access-7jdt7\") pod \"network-metrics-daemon-5bfzt\" (UID: \"3cf2b185-9a26-4448-8fc5-4885f98daf87\") " pod="openshift-multus/network-metrics-daemon-5bfzt" Oct 11 03:55:39 crc kubenswrapper[4798]: I1011 03:55:39.611065 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/3cf2b185-9a26-4448-8fc5-4885f98daf87-metrics-certs\") pod \"network-metrics-daemon-5bfzt\" (UID: \"3cf2b185-9a26-4448-8fc5-4885f98daf87\") " pod="openshift-multus/network-metrics-daemon-5bfzt" Oct 11 03:55:39 crc kubenswrapper[4798]: I1011 03:55:39.612959 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-5bfzt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3cf2b185-9a26-4448-8fc5-4885f98daf87\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jdt7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jdt7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:39Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-5bfzt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:39Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:39 crc kubenswrapper[4798]: I1011 03:55:39.631691 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-5sxhs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"89cbb20d-9fdb-4fed-9ad2-acb1207afc46\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z7jvd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z7jvd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-5sxhs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:39Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:39 crc kubenswrapper[4798]: I1011 03:55:39.646692 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6129cc4f78a62c5fb76f4859ec954c8901954d27b7f05672e97d992e0c933191\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:39Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:39 crc kubenswrapper[4798]: I1011 03:55:39.650241 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:39 crc kubenswrapper[4798]: I1011 03:55:39.650280 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:39 crc kubenswrapper[4798]: I1011 03:55:39.650292 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:39 crc kubenswrapper[4798]: I1011 03:55:39.650310 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:39 crc kubenswrapper[4798]: I1011 03:55:39.650323 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:39Z","lastTransitionTime":"2025-10-11T03:55:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:39 crc kubenswrapper[4798]: I1011 03:55:39.660825 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-p6xdd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bd9c0e44-3329-422a-907b-e9e9bb6194cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a46e4bfff29af67bd6304e68df8fb4aa7e2184648b85398f453fde878b27d825\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p9wst\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-p6xdd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:39Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:39 crc kubenswrapper[4798]: I1011 03:55:39.672330 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-cwk9h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a97c4ffd-df63-4ce6-8903-80849a23a9fa\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3dc58b162f1d2b3447282c149ec7cd6d60b4c53fa3cf51dfb4a821d115c9a2c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pfvbf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:28Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-cwk9h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:39Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:39 crc kubenswrapper[4798]: I1011 03:55:39.686191 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:39Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:39 crc kubenswrapper[4798]: I1011 03:55:39.699044 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42571bc8-2186-4e3b-bba9-28f5a8f364d0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c66c40f71f0877eee058976f94625fe1c4bcab5dfad363be39e6863c2e9f07a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5gdw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d4862faa12fa49bd57098b8b4e9e9dd287da8c8808a9ca32bfdd3a9b4313945d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5gdw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-h28s2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:39Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:39 crc kubenswrapper[4798]: I1011 03:55:39.706890 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-5sxhs" event={"ID":"89cbb20d-9fdb-4fed-9ad2-acb1207afc46","Type":"ContainerStarted","Data":"2f0c589033410abafbc39c6a7386343c70bb92ed0a1d7336188a44cc6e4fd65c"} Oct 11 03:55:39 crc kubenswrapper[4798]: I1011 03:55:39.706965 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-5sxhs" event={"ID":"89cbb20d-9fdb-4fed-9ad2-acb1207afc46","Type":"ContainerStarted","Data":"6a3655b5826d719e04f0972221ea72554a15025acffa8e2689b4b2855d6151d9"} Oct 11 03:55:39 crc kubenswrapper[4798]: I1011 03:55:39.706980 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-5sxhs" event={"ID":"89cbb20d-9fdb-4fed-9ad2-acb1207afc46","Type":"ContainerStarted","Data":"77de730368a0c0a499b8cf86db05c308dad0604b036216621bfb1afe5ce95c48"} Oct 11 03:55:39 crc kubenswrapper[4798]: I1011 03:55:39.709468 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-svt4z_3c2a342b-5252-4957-9e2c-8f81c304b8af/ovnkube-controller/1.log" Oct 11 03:55:39 crc kubenswrapper[4798]: I1011 03:55:39.711510 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7jdt7\" (UniqueName: \"kubernetes.io/projected/3cf2b185-9a26-4448-8fc5-4885f98daf87-kube-api-access-7jdt7\") pod \"network-metrics-daemon-5bfzt\" (UID: \"3cf2b185-9a26-4448-8fc5-4885f98daf87\") " pod="openshift-multus/network-metrics-daemon-5bfzt" Oct 11 03:55:39 crc kubenswrapper[4798]: I1011 03:55:39.711569 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/3cf2b185-9a26-4448-8fc5-4885f98daf87-metrics-certs\") pod \"network-metrics-daemon-5bfzt\" (UID: \"3cf2b185-9a26-4448-8fc5-4885f98daf87\") " pod="openshift-multus/network-metrics-daemon-5bfzt" Oct 11 03:55:39 crc kubenswrapper[4798]: E1011 03:55:39.711667 4798 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Oct 11 03:55:39 crc kubenswrapper[4798]: E1011 03:55:39.711709 4798 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/3cf2b185-9a26-4448-8fc5-4885f98daf87-metrics-certs podName:3cf2b185-9a26-4448-8fc5-4885f98daf87 nodeName:}" failed. No retries permitted until 2025-10-11 03:55:40.211694849 +0000 UTC m=+35.547984535 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/3cf2b185-9a26-4448-8fc5-4885f98daf87-metrics-certs") pod "network-metrics-daemon-5bfzt" (UID: "3cf2b185-9a26-4448-8fc5-4885f98daf87") : object "openshift-multus"/"metrics-daemon-secret" not registered Oct 11 03:55:39 crc kubenswrapper[4798]: I1011 03:55:39.713380 4798 scope.go:117] "RemoveContainer" containerID="0c1c7c6aa877690a8caf39779fec8180efdbf7169264c931008c3f639d938242" Oct 11 03:55:39 crc kubenswrapper[4798]: E1011 03:55:39.713560 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-svt4z_openshift-ovn-kubernetes(3c2a342b-5252-4957-9e2c-8f81c304b8af)\"" pod="openshift-ovn-kubernetes/ovnkube-node-svt4z" podUID="3c2a342b-5252-4957-9e2c-8f81c304b8af" Oct 11 03:55:39 crc kubenswrapper[4798]: I1011 03:55:39.718601 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-7fk5l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4b6a179c-1d23-4cb2-91b9-d34496a74456\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5bfabccc875a12072ad81dc6bf74d849c99baa004c10d213f798c363248137db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b908ba64c3d6d1d45e1cab82e69176c3c84bf88dde3e9425fc7c3b4c84685ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8b908ba64c3d6d1d45e1cab82e69176c3c84bf88dde3e9425fc7c3b4c84685ed\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae38c0781a4a8367bba95648253e72409e06cdc39825b137084fa6f4d33a4cb7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ae38c0781a4a8367bba95648253e72409e06cdc39825b137084fa6f4d33a4cb7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b663ffff34d80940880b3959c81519084b55bd5edcedbce9402d5fad1f898034\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b663ffff34d80940880b3959c81519084b55bd5edcedbce9402d5fad1f898034\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5b4df13fbe7bbb9ba49f772c2bd4bfe6a10a17bfa12bc500f0647659be7b0cdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5b4df13fbe7bbb9ba49f772c2bd4bfe6a10a17bfa12bc500f0647659be7b0cdf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://997fd5172ccd393f7befe4343619dd30eba136428035ac9fc71f0ec99791a53b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://997fd5172ccd393f7befe4343619dd30eba136428035ac9fc71f0ec99791a53b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://20198b637812738041fd2e57cdb24097b8f92af726b1ad103fee423469a27136\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://20198b637812738041fd2e57cdb24097b8f92af726b1ad103fee423469a27136\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-7fk5l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:39Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:39 crc kubenswrapper[4798]: I1011 03:55:39.732957 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7jdt7\" (UniqueName: \"kubernetes.io/projected/3cf2b185-9a26-4448-8fc5-4885f98daf87-kube-api-access-7jdt7\") pod \"network-metrics-daemon-5bfzt\" (UID: \"3cf2b185-9a26-4448-8fc5-4885f98daf87\") " pod="openshift-multus/network-metrics-daemon-5bfzt" Oct 11 03:55:39 crc kubenswrapper[4798]: I1011 03:55:39.736854 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-svt4z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3c2a342b-5252-4957-9e2c-8f81c304b8af\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5a66de04a274f07f090f66d9da727e1985f9c171038de366b409a0300d1b6cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5667e3d7288833ff8c576f64f111c3a2f59619a3577f969d0afdd633962bd30d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dd63d88fe2ebe6035765ee3f3c05c2ef747f9b2869da239825c08667935c9fb5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://671945e25068ff79ab8574dd96b2d81646b8240da41f34a10aa857f0c2520df2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://337fb496a29d8027f6400b57b1625b68029a16e368139f753b9b94df22448477\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c9529e6c53d42e111799753c41ef620b382b2a2b1991aa71d4ee846029a5136\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0c1c7c6aa877690a8caf39779fec8180efdbf7169264c931008c3f639d938242\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f74df615a5e83aeb9df24b50315da33c1d6d05d5c25198463d48b0463966d797\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-11T03:55:35Z\\\",\\\"message\\\":\\\"val\\\\nI1011 03:55:35.886832 6105 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1011 03:55:35.886853 6105 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1011 03:55:35.887159 6105 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1011 03:55:35.887199 6105 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1011 03:55:35.887235 6105 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1011 03:55:35.887241 6105 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1011 03:55:35.887242 6105 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1011 03:55:35.887256 6105 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1011 03:55:35.887323 6105 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1011 03:55:35.887361 6105 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1011 03:55:35.887385 6105 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1011 03:55:35.887372 6105 handler.go:208] Removed *v1.Node event handler 2\\\\nI1011 03:55:35.887443 6105 handler.go:208] Removed *v1.Node event handler 7\\\\nI1011 03:55:35.887509 6105 factory.go:656] Stopping watch factory\\\\nI1011 03:55:35.887539 6105 ovnkube.go:599] Stopped ovnkube\\\\nI1011 03:55:35.887518 6105 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:32Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0c1c7c6aa877690a8caf39779fec8180efdbf7169264c931008c3f639d938242\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-11T03:55:38Z\\\",\\\"message\\\":\\\"gressip/v1/apis/informers/externalversions/factory.go:140\\\\nI1011 03:55:37.822041 6251 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1011 03:55:37.822100 6251 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1011 03:55:37.822166 6251 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1011 03:55:37.822165 6251 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1011 03:55:37.822204 6251 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1011 03:55:37.822214 6251 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1011 03:55:37.822229 6251 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1011 03:55:37.822237 6251 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1011 03:55:37.822241 6251 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1011 03:55:37.822251 6251 factory.go:656] Stopping watch factory\\\\nI1011 03:55:37.822260 6251 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1011 03:55:37.822266 6251 ovnkube.go:599] Stopped ovnkube\\\\nI1011 03:55:37.822269 6251 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1011 03:55:37.822277 6251 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1011 03:55:37.822302 6251 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1011 03:55:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://732433bbe785758ed4cbedf667608c4780a5d1e7e2eafd432735f0d74ea01d48\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9f8c139ffa0a5dc24b40f6d5bcc2fa9422f58b275692d3ce3ba5a928c1e454a7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f8c139ffa0a5dc24b40f6d5bcc2fa9422f58b275692d3ce3ba5a928c1e454a7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:26Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-svt4z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:39Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:39 crc kubenswrapper[4798]: I1011 03:55:39.748802 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5ec61df088ad49a0392264c33da84b7324c4aa608201d71e309401ab80013fb8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://10697a88b117962307b632f70354acdfd493e92c2909908a31c90c754dcbc0ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:39Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:39 crc kubenswrapper[4798]: I1011 03:55:39.752586 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:39 crc kubenswrapper[4798]: I1011 03:55:39.752612 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:39 crc kubenswrapper[4798]: I1011 03:55:39.752621 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:39 crc kubenswrapper[4798]: I1011 03:55:39.752633 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:39 crc kubenswrapper[4798]: I1011 03:55:39.752642 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:39Z","lastTransitionTime":"2025-10-11T03:55:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:39 crc kubenswrapper[4798]: I1011 03:55:39.760109 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-5bfzt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3cf2b185-9a26-4448-8fc5-4885f98daf87\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jdt7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jdt7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:39Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-5bfzt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:39Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:39 crc kubenswrapper[4798]: I1011 03:55:39.771944 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6129cc4f78a62c5fb76f4859ec954c8901954d27b7f05672e97d992e0c933191\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:39Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:39 crc kubenswrapper[4798]: I1011 03:55:39.786518 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-p6xdd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bd9c0e44-3329-422a-907b-e9e9bb6194cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a46e4bfff29af67bd6304e68df8fb4aa7e2184648b85398f453fde878b27d825\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p9wst\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-p6xdd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:39Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:39 crc kubenswrapper[4798]: I1011 03:55:39.803757 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-5sxhs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"89cbb20d-9fdb-4fed-9ad2-acb1207afc46\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a3655b5826d719e04f0972221ea72554a15025acffa8e2689b4b2855d6151d9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z7jvd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f0c589033410abafbc39c6a7386343c70bb92ed0a1d7336188a44cc6e4fd65c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z7jvd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-5sxhs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:39Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:39 crc kubenswrapper[4798]: I1011 03:55:39.817168 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42571bc8-2186-4e3b-bba9-28f5a8f364d0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c66c40f71f0877eee058976f94625fe1c4bcab5dfad363be39e6863c2e9f07a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5gdw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d4862faa12fa49bd57098b8b4e9e9dd287da8c8808a9ca32bfdd3a9b4313945d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5gdw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-h28s2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:39Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:39 crc kubenswrapper[4798]: I1011 03:55:39.833543 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-7fk5l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4b6a179c-1d23-4cb2-91b9-d34496a74456\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5bfabccc875a12072ad81dc6bf74d849c99baa004c10d213f798c363248137db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b908ba64c3d6d1d45e1cab82e69176c3c84bf88dde3e9425fc7c3b4c84685ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8b908ba64c3d6d1d45e1cab82e69176c3c84bf88dde3e9425fc7c3b4c84685ed\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae38c0781a4a8367bba95648253e72409e06cdc39825b137084fa6f4d33a4cb7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ae38c0781a4a8367bba95648253e72409e06cdc39825b137084fa6f4d33a4cb7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b663ffff34d80940880b3959c81519084b55bd5edcedbce9402d5fad1f898034\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b663ffff34d80940880b3959c81519084b55bd5edcedbce9402d5fad1f898034\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5b4df13fbe7bbb9ba49f772c2bd4bfe6a10a17bfa12bc500f0647659be7b0cdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5b4df13fbe7bbb9ba49f772c2bd4bfe6a10a17bfa12bc500f0647659be7b0cdf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://997fd5172ccd393f7befe4343619dd30eba136428035ac9fc71f0ec99791a53b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://997fd5172ccd393f7befe4343619dd30eba136428035ac9fc71f0ec99791a53b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://20198b637812738041fd2e57cdb24097b8f92af726b1ad103fee423469a27136\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://20198b637812738041fd2e57cdb24097b8f92af726b1ad103fee423469a27136\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-7fk5l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:39Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:39 crc kubenswrapper[4798]: I1011 03:55:39.850126 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-svt4z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3c2a342b-5252-4957-9e2c-8f81c304b8af\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5a66de04a274f07f090f66d9da727e1985f9c171038de366b409a0300d1b6cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5667e3d7288833ff8c576f64f111c3a2f59619a3577f969d0afdd633962bd30d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dd63d88fe2ebe6035765ee3f3c05c2ef747f9b2869da239825c08667935c9fb5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://671945e25068ff79ab8574dd96b2d81646b8240da41f34a10aa857f0c2520df2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://337fb496a29d8027f6400b57b1625b68029a16e368139f753b9b94df22448477\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c9529e6c53d42e111799753c41ef620b382b2a2b1991aa71d4ee846029a5136\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0c1c7c6aa877690a8caf39779fec8180efdbf7169264c931008c3f639d938242\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0c1c7c6aa877690a8caf39779fec8180efdbf7169264c931008c3f639d938242\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-11T03:55:38Z\\\",\\\"message\\\":\\\"gressip/v1/apis/informers/externalversions/factory.go:140\\\\nI1011 03:55:37.822041 6251 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1011 03:55:37.822100 6251 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1011 03:55:37.822166 6251 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1011 03:55:37.822165 6251 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1011 03:55:37.822204 6251 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1011 03:55:37.822214 6251 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1011 03:55:37.822229 6251 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1011 03:55:37.822237 6251 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1011 03:55:37.822241 6251 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1011 03:55:37.822251 6251 factory.go:656] Stopping watch factory\\\\nI1011 03:55:37.822260 6251 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1011 03:55:37.822266 6251 ovnkube.go:599] Stopped ovnkube\\\\nI1011 03:55:37.822269 6251 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1011 03:55:37.822277 6251 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1011 03:55:37.822302 6251 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1011 03:55:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:36Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-svt4z_openshift-ovn-kubernetes(3c2a342b-5252-4957-9e2c-8f81c304b8af)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://732433bbe785758ed4cbedf667608c4780a5d1e7e2eafd432735f0d74ea01d48\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9f8c139ffa0a5dc24b40f6d5bcc2fa9422f58b275692d3ce3ba5a928c1e454a7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f8c139ffa0a5dc24b40f6d5bcc2fa9422f58b275692d3ce3ba5a928c1e454a7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:26Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-svt4z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:39Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:39 crc kubenswrapper[4798]: I1011 03:55:39.854439 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:39 crc kubenswrapper[4798]: I1011 03:55:39.854466 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:39 crc kubenswrapper[4798]: I1011 03:55:39.854473 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:39 crc kubenswrapper[4798]: I1011 03:55:39.854486 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:39 crc kubenswrapper[4798]: I1011 03:55:39.854495 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:39Z","lastTransitionTime":"2025-10-11T03:55:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:39 crc kubenswrapper[4798]: I1011 03:55:39.862139 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-cwk9h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a97c4ffd-df63-4ce6-8903-80849a23a9fa\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3dc58b162f1d2b3447282c149ec7cd6d60b4c53fa3cf51dfb4a821d115c9a2c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pfvbf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:28Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-cwk9h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:39Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:39 crc kubenswrapper[4798]: I1011 03:55:39.874174 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:39Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:39 crc kubenswrapper[4798]: I1011 03:55:39.887187 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2f4f8842-e378-44a9-b919-b1bf9a8df80e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://903e13c07c622087e30f21ffff2b60c31c852d8e994c23d7af6beab5a71bb873\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1087e7b8de486a192155ed94497edbca0dc98e4a84a577267fb132fc48e6b142\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://08a33c11cee6a48ebcf66f251111b8bd888bd40fe6b52f43a3768eac817ed59f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eff419f507fc5362e956419c6fd14dc6a161f7297b026a5ec449647051f98767\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:05Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:39Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:39 crc kubenswrapper[4798]: I1011 03:55:39.899271 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4936cbe98df337a2b4d0d6e3effe0afd990facff65daf923c475c9108e05fef9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:39Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:39 crc kubenswrapper[4798]: I1011 03:55:39.910118 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:39Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:39 crc kubenswrapper[4798]: I1011 03:55:39.919460 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kszgd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e999fc8d-dd8d-415f-b72b-1c6189cf18a9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a92a11778001614d226c95af9dde787c78232568c3b5c073618a843f1225b21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h6wm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kszgd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:39Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:39 crc kubenswrapper[4798]: I1011 03:55:39.935421 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:39Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:39 crc kubenswrapper[4798]: I1011 03:55:39.947570 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bfd670e2-5a6d-4d56-97d6-c72b5474cfe6\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d79289a0b2f4d9eee836f57ba898bd329060194772e80855547f1982371b0921\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1970a648bf4ee991b9b58bfade524bee260b45f727cba5864a6545a10487b3ae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a79edde80930f3aad632c14d6ad6b3df6f8449f32107a82eb906785e873e1ab7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4b8e09f509a9f25922d49024be407873d79d01edb604193f9d19fcd6c89cf96d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://805f894634e76814dc3071315ddd3d8ac46cdf65a12e97f4ca56b98fe53ab25a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-11T03:55:19Z\\\",\\\"message\\\":\\\"W1011 03:55:08.564227 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1011 03:55:08.564630 1 crypto.go:601] Generating new CA for check-endpoints-signer@1760154908 cert, and key in /tmp/serving-cert-1527351980/serving-signer.crt, /tmp/serving-cert-1527351980/serving-signer.key\\\\nI1011 03:55:08.869630 1 observer_polling.go:159] Starting file observer\\\\nW1011 03:55:08.874583 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1011 03:55:08.874721 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1011 03:55:08.878058 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1527351980/tls.crt::/tmp/serving-cert-1527351980/tls.key\\\\\\\"\\\\nF1011 03:55:19.813421 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:08Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://58ca7c079d81ba990c4508c024d6db7c3503be1d628477798459b32e2374ef22\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:08Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1171e7b719c532d99d1d222fc5dbc434060d5d149bd2b6b3dea02f9716816b26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1171e7b719c532d99d1d222fc5dbc434060d5d149bd2b6b3dea02f9716816b26\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:05Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:39Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:39 crc kubenswrapper[4798]: I1011 03:55:39.957141 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:39 crc kubenswrapper[4798]: I1011 03:55:39.957161 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:39 crc kubenswrapper[4798]: I1011 03:55:39.957170 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:39 crc kubenswrapper[4798]: I1011 03:55:39.957184 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:39 crc kubenswrapper[4798]: I1011 03:55:39.957192 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:39Z","lastTransitionTime":"2025-10-11T03:55:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:40 crc kubenswrapper[4798]: I1011 03:55:40.059902 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:40 crc kubenswrapper[4798]: I1011 03:55:40.059946 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:40 crc kubenswrapper[4798]: I1011 03:55:40.059959 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:40 crc kubenswrapper[4798]: I1011 03:55:40.059977 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:40 crc kubenswrapper[4798]: I1011 03:55:40.059988 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:40Z","lastTransitionTime":"2025-10-11T03:55:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:40 crc kubenswrapper[4798]: I1011 03:55:40.162646 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:40 crc kubenswrapper[4798]: I1011 03:55:40.162962 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:40 crc kubenswrapper[4798]: I1011 03:55:40.163033 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:40 crc kubenswrapper[4798]: I1011 03:55:40.163115 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:40 crc kubenswrapper[4798]: I1011 03:55:40.163200 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:40Z","lastTransitionTime":"2025-10-11T03:55:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:40 crc kubenswrapper[4798]: I1011 03:55:40.217062 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/3cf2b185-9a26-4448-8fc5-4885f98daf87-metrics-certs\") pod \"network-metrics-daemon-5bfzt\" (UID: \"3cf2b185-9a26-4448-8fc5-4885f98daf87\") " pod="openshift-multus/network-metrics-daemon-5bfzt" Oct 11 03:55:40 crc kubenswrapper[4798]: E1011 03:55:40.217198 4798 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Oct 11 03:55:40 crc kubenswrapper[4798]: E1011 03:55:40.217653 4798 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/3cf2b185-9a26-4448-8fc5-4885f98daf87-metrics-certs podName:3cf2b185-9a26-4448-8fc5-4885f98daf87 nodeName:}" failed. No retries permitted until 2025-10-11 03:55:41.21762688 +0000 UTC m=+36.553916576 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/3cf2b185-9a26-4448-8fc5-4885f98daf87-metrics-certs") pod "network-metrics-daemon-5bfzt" (UID: "3cf2b185-9a26-4448-8fc5-4885f98daf87") : object "openshift-multus"/"metrics-daemon-secret" not registered Oct 11 03:55:40 crc kubenswrapper[4798]: I1011 03:55:40.265182 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:40 crc kubenswrapper[4798]: I1011 03:55:40.265232 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:40 crc kubenswrapper[4798]: I1011 03:55:40.265245 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:40 crc kubenswrapper[4798]: I1011 03:55:40.265264 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:40 crc kubenswrapper[4798]: I1011 03:55:40.265278 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:40Z","lastTransitionTime":"2025-10-11T03:55:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:40 crc kubenswrapper[4798]: I1011 03:55:40.368116 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:40 crc kubenswrapper[4798]: I1011 03:55:40.368172 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:40 crc kubenswrapper[4798]: I1011 03:55:40.368184 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:40 crc kubenswrapper[4798]: I1011 03:55:40.368203 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:40 crc kubenswrapper[4798]: I1011 03:55:40.368217 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:40Z","lastTransitionTime":"2025-10-11T03:55:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:40 crc kubenswrapper[4798]: I1011 03:55:40.471334 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:40 crc kubenswrapper[4798]: I1011 03:55:40.471687 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:40 crc kubenswrapper[4798]: I1011 03:55:40.471752 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:40 crc kubenswrapper[4798]: I1011 03:55:40.471813 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:40 crc kubenswrapper[4798]: I1011 03:55:40.471898 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:40Z","lastTransitionTime":"2025-10-11T03:55:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:40 crc kubenswrapper[4798]: I1011 03:55:40.573679 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:40 crc kubenswrapper[4798]: I1011 03:55:40.573725 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:40 crc kubenswrapper[4798]: I1011 03:55:40.573736 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:40 crc kubenswrapper[4798]: I1011 03:55:40.573769 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:40 crc kubenswrapper[4798]: I1011 03:55:40.573778 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:40Z","lastTransitionTime":"2025-10-11T03:55:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:40 crc kubenswrapper[4798]: I1011 03:55:40.676413 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:40 crc kubenswrapper[4798]: I1011 03:55:40.676451 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:40 crc kubenswrapper[4798]: I1011 03:55:40.676462 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:40 crc kubenswrapper[4798]: I1011 03:55:40.676476 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:40 crc kubenswrapper[4798]: I1011 03:55:40.676487 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:40Z","lastTransitionTime":"2025-10-11T03:55:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:40 crc kubenswrapper[4798]: I1011 03:55:40.779137 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:40 crc kubenswrapper[4798]: I1011 03:55:40.780494 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:40 crc kubenswrapper[4798]: I1011 03:55:40.780549 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:40 crc kubenswrapper[4798]: I1011 03:55:40.780569 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:40 crc kubenswrapper[4798]: I1011 03:55:40.780583 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:40Z","lastTransitionTime":"2025-10-11T03:55:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:40 crc kubenswrapper[4798]: I1011 03:55:40.883159 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:40 crc kubenswrapper[4798]: I1011 03:55:40.883195 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:40 crc kubenswrapper[4798]: I1011 03:55:40.883204 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:40 crc kubenswrapper[4798]: I1011 03:55:40.883219 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:40 crc kubenswrapper[4798]: I1011 03:55:40.883228 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:40Z","lastTransitionTime":"2025-10-11T03:55:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:40 crc kubenswrapper[4798]: I1011 03:55:40.985744 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:40 crc kubenswrapper[4798]: I1011 03:55:40.986123 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:40 crc kubenswrapper[4798]: I1011 03:55:40.986311 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:40 crc kubenswrapper[4798]: I1011 03:55:40.986715 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:40 crc kubenswrapper[4798]: I1011 03:55:40.986921 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:40Z","lastTransitionTime":"2025-10-11T03:55:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:41 crc kubenswrapper[4798]: I1011 03:55:41.090212 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:41 crc kubenswrapper[4798]: I1011 03:55:41.090246 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:41 crc kubenswrapper[4798]: I1011 03:55:41.090254 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:41 crc kubenswrapper[4798]: I1011 03:55:41.090268 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:41 crc kubenswrapper[4798]: I1011 03:55:41.090277 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:41Z","lastTransitionTime":"2025-10-11T03:55:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:41 crc kubenswrapper[4798]: I1011 03:55:41.126856 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 11 03:55:41 crc kubenswrapper[4798]: I1011 03:55:41.126904 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 11 03:55:41 crc kubenswrapper[4798]: I1011 03:55:41.126942 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 11 03:55:41 crc kubenswrapper[4798]: I1011 03:55:41.126967 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 11 03:55:41 crc kubenswrapper[4798]: E1011 03:55:41.127082 4798 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Oct 11 03:55:41 crc kubenswrapper[4798]: E1011 03:55:41.127087 4798 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Oct 11 03:55:41 crc kubenswrapper[4798]: E1011 03:55:41.127133 4798 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Oct 11 03:55:41 crc kubenswrapper[4798]: E1011 03:55:41.127095 4798 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Oct 11 03:55:41 crc kubenswrapper[4798]: E1011 03:55:41.127167 4798 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Oct 11 03:55:41 crc kubenswrapper[4798]: E1011 03:55:41.127171 4798 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-10-11 03:55:57.127146992 +0000 UTC m=+52.463436708 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Oct 11 03:55:41 crc kubenswrapper[4798]: E1011 03:55:41.127191 4798 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-10-11 03:55:57.127182743 +0000 UTC m=+52.463472429 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Oct 11 03:55:41 crc kubenswrapper[4798]: E1011 03:55:41.127202 4798 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-10-11 03:55:57.127197664 +0000 UTC m=+52.463487350 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Oct 11 03:55:41 crc kubenswrapper[4798]: E1011 03:55:41.127088 4798 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Oct 11 03:55:41 crc kubenswrapper[4798]: E1011 03:55:41.127231 4798 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Oct 11 03:55:41 crc kubenswrapper[4798]: E1011 03:55:41.127249 4798 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Oct 11 03:55:41 crc kubenswrapper[4798]: E1011 03:55:41.127292 4798 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-10-11 03:55:57.127279435 +0000 UTC m=+52.463569161 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Oct 11 03:55:41 crc kubenswrapper[4798]: I1011 03:55:41.192205 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:41 crc kubenswrapper[4798]: I1011 03:55:41.192244 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:41 crc kubenswrapper[4798]: I1011 03:55:41.192255 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:41 crc kubenswrapper[4798]: I1011 03:55:41.192309 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:41 crc kubenswrapper[4798]: I1011 03:55:41.192321 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:41Z","lastTransitionTime":"2025-10-11T03:55:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:41 crc kubenswrapper[4798]: I1011 03:55:41.228174 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 03:55:41 crc kubenswrapper[4798]: E1011 03:55:41.228328 4798 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-11 03:55:57.228304122 +0000 UTC m=+52.564593808 (durationBeforeRetry 16s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 03:55:41 crc kubenswrapper[4798]: I1011 03:55:41.228372 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/3cf2b185-9a26-4448-8fc5-4885f98daf87-metrics-certs\") pod \"network-metrics-daemon-5bfzt\" (UID: \"3cf2b185-9a26-4448-8fc5-4885f98daf87\") " pod="openshift-multus/network-metrics-daemon-5bfzt" Oct 11 03:55:41 crc kubenswrapper[4798]: E1011 03:55:41.228510 4798 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Oct 11 03:55:41 crc kubenswrapper[4798]: E1011 03:55:41.228605 4798 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/3cf2b185-9a26-4448-8fc5-4885f98daf87-metrics-certs podName:3cf2b185-9a26-4448-8fc5-4885f98daf87 nodeName:}" failed. No retries permitted until 2025-10-11 03:55:43.228560678 +0000 UTC m=+38.564850434 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/3cf2b185-9a26-4448-8fc5-4885f98daf87-metrics-certs") pod "network-metrics-daemon-5bfzt" (UID: "3cf2b185-9a26-4448-8fc5-4885f98daf87") : object "openshift-multus"/"metrics-daemon-secret" not registered Oct 11 03:55:41 crc kubenswrapper[4798]: I1011 03:55:41.295514 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:41 crc kubenswrapper[4798]: I1011 03:55:41.295591 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:41 crc kubenswrapper[4798]: I1011 03:55:41.295613 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:41 crc kubenswrapper[4798]: I1011 03:55:41.295636 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:41 crc kubenswrapper[4798]: I1011 03:55:41.295653 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:41Z","lastTransitionTime":"2025-10-11T03:55:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:41 crc kubenswrapper[4798]: I1011 03:55:41.398164 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:41 crc kubenswrapper[4798]: I1011 03:55:41.398224 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:41 crc kubenswrapper[4798]: I1011 03:55:41.398243 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:41 crc kubenswrapper[4798]: I1011 03:55:41.398266 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:41 crc kubenswrapper[4798]: I1011 03:55:41.398284 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:41Z","lastTransitionTime":"2025-10-11T03:55:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:41 crc kubenswrapper[4798]: I1011 03:55:41.423343 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 11 03:55:41 crc kubenswrapper[4798]: I1011 03:55:41.423434 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 11 03:55:41 crc kubenswrapper[4798]: E1011 03:55:41.423479 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 11 03:55:41 crc kubenswrapper[4798]: I1011 03:55:41.423494 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 11 03:55:41 crc kubenswrapper[4798]: E1011 03:55:41.423595 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 11 03:55:41 crc kubenswrapper[4798]: I1011 03:55:41.423652 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-5bfzt" Oct 11 03:55:41 crc kubenswrapper[4798]: E1011 03:55:41.423740 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 11 03:55:41 crc kubenswrapper[4798]: E1011 03:55:41.423827 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-5bfzt" podUID="3cf2b185-9a26-4448-8fc5-4885f98daf87" Oct 11 03:55:41 crc kubenswrapper[4798]: I1011 03:55:41.500905 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:41 crc kubenswrapper[4798]: I1011 03:55:41.500951 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:41 crc kubenswrapper[4798]: I1011 03:55:41.500963 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:41 crc kubenswrapper[4798]: I1011 03:55:41.500979 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:41 crc kubenswrapper[4798]: I1011 03:55:41.500992 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:41Z","lastTransitionTime":"2025-10-11T03:55:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:41 crc kubenswrapper[4798]: I1011 03:55:41.603345 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:41 crc kubenswrapper[4798]: I1011 03:55:41.603430 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:41 crc kubenswrapper[4798]: I1011 03:55:41.603453 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:41 crc kubenswrapper[4798]: I1011 03:55:41.603481 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:41 crc kubenswrapper[4798]: I1011 03:55:41.603505 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:41Z","lastTransitionTime":"2025-10-11T03:55:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:41 crc kubenswrapper[4798]: I1011 03:55:41.653292 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:41 crc kubenswrapper[4798]: I1011 03:55:41.653367 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:41 crc kubenswrapper[4798]: I1011 03:55:41.653415 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:41 crc kubenswrapper[4798]: I1011 03:55:41.653441 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:41 crc kubenswrapper[4798]: I1011 03:55:41.653459 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:41Z","lastTransitionTime":"2025-10-11T03:55:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:41 crc kubenswrapper[4798]: E1011 03:55:41.674368 4798 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T03:55:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:41Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T03:55:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:41Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T03:55:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:41Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T03:55:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:41Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"319d317c-5958-4e72-8ec1-75f04665505c\\\",\\\"systemUUID\\\":\\\"914a85fd-a642-4471-b13e-f33764f494b0\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:41Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:41 crc kubenswrapper[4798]: I1011 03:55:41.678562 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:41 crc kubenswrapper[4798]: I1011 03:55:41.678616 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:41 crc kubenswrapper[4798]: I1011 03:55:41.678633 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:41 crc kubenswrapper[4798]: I1011 03:55:41.678654 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:41 crc kubenswrapper[4798]: I1011 03:55:41.678665 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:41Z","lastTransitionTime":"2025-10-11T03:55:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:41 crc kubenswrapper[4798]: E1011 03:55:41.696059 4798 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T03:55:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:41Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T03:55:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:41Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T03:55:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:41Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T03:55:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:41Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"319d317c-5958-4e72-8ec1-75f04665505c\\\",\\\"systemUUID\\\":\\\"914a85fd-a642-4471-b13e-f33764f494b0\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:41Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:41 crc kubenswrapper[4798]: I1011 03:55:41.700579 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:41 crc kubenswrapper[4798]: I1011 03:55:41.700651 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:41 crc kubenswrapper[4798]: I1011 03:55:41.700667 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:41 crc kubenswrapper[4798]: I1011 03:55:41.700687 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:41 crc kubenswrapper[4798]: I1011 03:55:41.700701 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:41Z","lastTransitionTime":"2025-10-11T03:55:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:41 crc kubenswrapper[4798]: E1011 03:55:41.713957 4798 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T03:55:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:41Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T03:55:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:41Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T03:55:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:41Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T03:55:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:41Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"319d317c-5958-4e72-8ec1-75f04665505c\\\",\\\"systemUUID\\\":\\\"914a85fd-a642-4471-b13e-f33764f494b0\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:41Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:41 crc kubenswrapper[4798]: I1011 03:55:41.718704 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:41 crc kubenswrapper[4798]: I1011 03:55:41.718763 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:41 crc kubenswrapper[4798]: I1011 03:55:41.718781 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:41 crc kubenswrapper[4798]: I1011 03:55:41.718805 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:41 crc kubenswrapper[4798]: I1011 03:55:41.718820 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:41Z","lastTransitionTime":"2025-10-11T03:55:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:41 crc kubenswrapper[4798]: E1011 03:55:41.736080 4798 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T03:55:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:41Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T03:55:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:41Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T03:55:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:41Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T03:55:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:41Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"319d317c-5958-4e72-8ec1-75f04665505c\\\",\\\"systemUUID\\\":\\\"914a85fd-a642-4471-b13e-f33764f494b0\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:41Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:41 crc kubenswrapper[4798]: I1011 03:55:41.739983 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:41 crc kubenswrapper[4798]: I1011 03:55:41.740029 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:41 crc kubenswrapper[4798]: I1011 03:55:41.740045 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:41 crc kubenswrapper[4798]: I1011 03:55:41.740064 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:41 crc kubenswrapper[4798]: I1011 03:55:41.740074 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:41Z","lastTransitionTime":"2025-10-11T03:55:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:41 crc kubenswrapper[4798]: E1011 03:55:41.754009 4798 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T03:55:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:41Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T03:55:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:41Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T03:55:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:41Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T03:55:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:41Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"319d317c-5958-4e72-8ec1-75f04665505c\\\",\\\"systemUUID\\\":\\\"914a85fd-a642-4471-b13e-f33764f494b0\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:41Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:41 crc kubenswrapper[4798]: E1011 03:55:41.754151 4798 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Oct 11 03:55:41 crc kubenswrapper[4798]: I1011 03:55:41.755603 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:41 crc kubenswrapper[4798]: I1011 03:55:41.755624 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:41 crc kubenswrapper[4798]: I1011 03:55:41.755633 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:41 crc kubenswrapper[4798]: I1011 03:55:41.755646 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:41 crc kubenswrapper[4798]: I1011 03:55:41.755656 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:41Z","lastTransitionTime":"2025-10-11T03:55:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:41 crc kubenswrapper[4798]: I1011 03:55:41.857759 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:41 crc kubenswrapper[4798]: I1011 03:55:41.857798 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:41 crc kubenswrapper[4798]: I1011 03:55:41.857810 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:41 crc kubenswrapper[4798]: I1011 03:55:41.857827 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:41 crc kubenswrapper[4798]: I1011 03:55:41.857838 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:41Z","lastTransitionTime":"2025-10-11T03:55:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:41 crc kubenswrapper[4798]: I1011 03:55:41.960526 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:41 crc kubenswrapper[4798]: I1011 03:55:41.960570 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:41 crc kubenswrapper[4798]: I1011 03:55:41.960592 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:41 crc kubenswrapper[4798]: I1011 03:55:41.960613 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:41 crc kubenswrapper[4798]: I1011 03:55:41.960627 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:41Z","lastTransitionTime":"2025-10-11T03:55:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:42 crc kubenswrapper[4798]: I1011 03:55:42.064329 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:42 crc kubenswrapper[4798]: I1011 03:55:42.064369 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:42 crc kubenswrapper[4798]: I1011 03:55:42.064379 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:42 crc kubenswrapper[4798]: I1011 03:55:42.064433 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:42 crc kubenswrapper[4798]: I1011 03:55:42.064450 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:42Z","lastTransitionTime":"2025-10-11T03:55:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:42 crc kubenswrapper[4798]: I1011 03:55:42.167471 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:42 crc kubenswrapper[4798]: I1011 03:55:42.167548 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:42 crc kubenswrapper[4798]: I1011 03:55:42.167565 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:42 crc kubenswrapper[4798]: I1011 03:55:42.167591 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:42 crc kubenswrapper[4798]: I1011 03:55:42.167608 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:42Z","lastTransitionTime":"2025-10-11T03:55:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:42 crc kubenswrapper[4798]: I1011 03:55:42.270626 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:42 crc kubenswrapper[4798]: I1011 03:55:42.270685 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:42 crc kubenswrapper[4798]: I1011 03:55:42.270708 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:42 crc kubenswrapper[4798]: I1011 03:55:42.270736 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:42 crc kubenswrapper[4798]: I1011 03:55:42.270758 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:42Z","lastTransitionTime":"2025-10-11T03:55:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:42 crc kubenswrapper[4798]: I1011 03:55:42.373971 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:42 crc kubenswrapper[4798]: I1011 03:55:42.374078 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:42 crc kubenswrapper[4798]: I1011 03:55:42.374116 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:42 crc kubenswrapper[4798]: I1011 03:55:42.374154 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:42 crc kubenswrapper[4798]: I1011 03:55:42.374177 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:42Z","lastTransitionTime":"2025-10-11T03:55:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:42 crc kubenswrapper[4798]: I1011 03:55:42.477616 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:42 crc kubenswrapper[4798]: I1011 03:55:42.477719 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:42 crc kubenswrapper[4798]: I1011 03:55:42.477748 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:42 crc kubenswrapper[4798]: I1011 03:55:42.477785 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:42 crc kubenswrapper[4798]: I1011 03:55:42.477813 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:42Z","lastTransitionTime":"2025-10-11T03:55:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:42 crc kubenswrapper[4798]: I1011 03:55:42.581375 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:42 crc kubenswrapper[4798]: I1011 03:55:42.581481 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:42 crc kubenswrapper[4798]: I1011 03:55:42.581504 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:42 crc kubenswrapper[4798]: I1011 03:55:42.581532 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:42 crc kubenswrapper[4798]: I1011 03:55:42.581554 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:42Z","lastTransitionTime":"2025-10-11T03:55:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:42 crc kubenswrapper[4798]: I1011 03:55:42.684405 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:42 crc kubenswrapper[4798]: I1011 03:55:42.684447 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:42 crc kubenswrapper[4798]: I1011 03:55:42.684456 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:42 crc kubenswrapper[4798]: I1011 03:55:42.684470 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:42 crc kubenswrapper[4798]: I1011 03:55:42.684478 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:42Z","lastTransitionTime":"2025-10-11T03:55:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:42 crc kubenswrapper[4798]: I1011 03:55:42.788102 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:42 crc kubenswrapper[4798]: I1011 03:55:42.788183 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:42 crc kubenswrapper[4798]: I1011 03:55:42.788199 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:42 crc kubenswrapper[4798]: I1011 03:55:42.788221 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:42 crc kubenswrapper[4798]: I1011 03:55:42.788237 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:42Z","lastTransitionTime":"2025-10-11T03:55:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:42 crc kubenswrapper[4798]: I1011 03:55:42.891924 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:42 crc kubenswrapper[4798]: I1011 03:55:42.891988 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:42 crc kubenswrapper[4798]: I1011 03:55:42.892006 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:42 crc kubenswrapper[4798]: I1011 03:55:42.892045 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:42 crc kubenswrapper[4798]: I1011 03:55:42.892065 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:42Z","lastTransitionTime":"2025-10-11T03:55:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:42 crc kubenswrapper[4798]: I1011 03:55:42.995054 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:42 crc kubenswrapper[4798]: I1011 03:55:42.995116 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:42 crc kubenswrapper[4798]: I1011 03:55:42.995134 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:42 crc kubenswrapper[4798]: I1011 03:55:42.995153 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:42 crc kubenswrapper[4798]: I1011 03:55:42.995167 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:42Z","lastTransitionTime":"2025-10-11T03:55:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:43 crc kubenswrapper[4798]: I1011 03:55:43.098227 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:43 crc kubenswrapper[4798]: I1011 03:55:43.098273 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:43 crc kubenswrapper[4798]: I1011 03:55:43.098285 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:43 crc kubenswrapper[4798]: I1011 03:55:43.098302 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:43 crc kubenswrapper[4798]: I1011 03:55:43.098314 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:43Z","lastTransitionTime":"2025-10-11T03:55:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:43 crc kubenswrapper[4798]: I1011 03:55:43.201773 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:43 crc kubenswrapper[4798]: I1011 03:55:43.201855 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:43 crc kubenswrapper[4798]: I1011 03:55:43.202050 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:43 crc kubenswrapper[4798]: I1011 03:55:43.202081 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:43 crc kubenswrapper[4798]: I1011 03:55:43.202106 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:43Z","lastTransitionTime":"2025-10-11T03:55:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:43 crc kubenswrapper[4798]: I1011 03:55:43.254377 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/3cf2b185-9a26-4448-8fc5-4885f98daf87-metrics-certs\") pod \"network-metrics-daemon-5bfzt\" (UID: \"3cf2b185-9a26-4448-8fc5-4885f98daf87\") " pod="openshift-multus/network-metrics-daemon-5bfzt" Oct 11 03:55:43 crc kubenswrapper[4798]: E1011 03:55:43.254665 4798 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Oct 11 03:55:43 crc kubenswrapper[4798]: E1011 03:55:43.254769 4798 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/3cf2b185-9a26-4448-8fc5-4885f98daf87-metrics-certs podName:3cf2b185-9a26-4448-8fc5-4885f98daf87 nodeName:}" failed. No retries permitted until 2025-10-11 03:55:47.254746976 +0000 UTC m=+42.591036672 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/3cf2b185-9a26-4448-8fc5-4885f98daf87-metrics-certs") pod "network-metrics-daemon-5bfzt" (UID: "3cf2b185-9a26-4448-8fc5-4885f98daf87") : object "openshift-multus"/"metrics-daemon-secret" not registered Oct 11 03:55:43 crc kubenswrapper[4798]: I1011 03:55:43.304706 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:43 crc kubenswrapper[4798]: I1011 03:55:43.304779 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:43 crc kubenswrapper[4798]: I1011 03:55:43.304802 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:43 crc kubenswrapper[4798]: I1011 03:55:43.304834 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:43 crc kubenswrapper[4798]: I1011 03:55:43.304856 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:43Z","lastTransitionTime":"2025-10-11T03:55:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:43 crc kubenswrapper[4798]: I1011 03:55:43.407736 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:43 crc kubenswrapper[4798]: I1011 03:55:43.407784 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:43 crc kubenswrapper[4798]: I1011 03:55:43.407830 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:43 crc kubenswrapper[4798]: I1011 03:55:43.407848 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:43 crc kubenswrapper[4798]: I1011 03:55:43.407858 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:43Z","lastTransitionTime":"2025-10-11T03:55:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:43 crc kubenswrapper[4798]: I1011 03:55:43.423461 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 11 03:55:43 crc kubenswrapper[4798]: I1011 03:55:43.423465 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 11 03:55:43 crc kubenswrapper[4798]: I1011 03:55:43.423559 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 11 03:55:43 crc kubenswrapper[4798]: E1011 03:55:43.423740 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 11 03:55:43 crc kubenswrapper[4798]: I1011 03:55:43.430162 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-5bfzt" Oct 11 03:55:43 crc kubenswrapper[4798]: E1011 03:55:43.430374 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 11 03:55:43 crc kubenswrapper[4798]: E1011 03:55:43.430750 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 11 03:55:43 crc kubenswrapper[4798]: E1011 03:55:43.430756 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-5bfzt" podUID="3cf2b185-9a26-4448-8fc5-4885f98daf87" Oct 11 03:55:43 crc kubenswrapper[4798]: I1011 03:55:43.510675 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:43 crc kubenswrapper[4798]: I1011 03:55:43.510717 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:43 crc kubenswrapper[4798]: I1011 03:55:43.510729 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:43 crc kubenswrapper[4798]: I1011 03:55:43.510745 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:43 crc kubenswrapper[4798]: I1011 03:55:43.510756 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:43Z","lastTransitionTime":"2025-10-11T03:55:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:43 crc kubenswrapper[4798]: I1011 03:55:43.613509 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:43 crc kubenswrapper[4798]: I1011 03:55:43.613545 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:43 crc kubenswrapper[4798]: I1011 03:55:43.613557 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:43 crc kubenswrapper[4798]: I1011 03:55:43.613574 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:43 crc kubenswrapper[4798]: I1011 03:55:43.613586 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:43Z","lastTransitionTime":"2025-10-11T03:55:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:43 crc kubenswrapper[4798]: I1011 03:55:43.716071 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:43 crc kubenswrapper[4798]: I1011 03:55:43.716133 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:43 crc kubenswrapper[4798]: I1011 03:55:43.716156 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:43 crc kubenswrapper[4798]: I1011 03:55:43.716186 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:43 crc kubenswrapper[4798]: I1011 03:55:43.716286 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:43Z","lastTransitionTime":"2025-10-11T03:55:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:43 crc kubenswrapper[4798]: I1011 03:55:43.819362 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:43 crc kubenswrapper[4798]: I1011 03:55:43.819406 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:43 crc kubenswrapper[4798]: I1011 03:55:43.819415 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:43 crc kubenswrapper[4798]: I1011 03:55:43.819427 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:43 crc kubenswrapper[4798]: I1011 03:55:43.819436 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:43Z","lastTransitionTime":"2025-10-11T03:55:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:43 crc kubenswrapper[4798]: I1011 03:55:43.923229 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:43 crc kubenswrapper[4798]: I1011 03:55:43.923300 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:43 crc kubenswrapper[4798]: I1011 03:55:43.923323 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:43 crc kubenswrapper[4798]: I1011 03:55:43.923354 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:43 crc kubenswrapper[4798]: I1011 03:55:43.923376 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:43Z","lastTransitionTime":"2025-10-11T03:55:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:44 crc kubenswrapper[4798]: I1011 03:55:44.026776 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:44 crc kubenswrapper[4798]: I1011 03:55:44.026843 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:44 crc kubenswrapper[4798]: I1011 03:55:44.026860 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:44 crc kubenswrapper[4798]: I1011 03:55:44.026883 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:44 crc kubenswrapper[4798]: I1011 03:55:44.026903 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:44Z","lastTransitionTime":"2025-10-11T03:55:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:44 crc kubenswrapper[4798]: I1011 03:55:44.129358 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:44 crc kubenswrapper[4798]: I1011 03:55:44.129403 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:44 crc kubenswrapper[4798]: I1011 03:55:44.129413 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:44 crc kubenswrapper[4798]: I1011 03:55:44.129425 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:44 crc kubenswrapper[4798]: I1011 03:55:44.129433 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:44Z","lastTransitionTime":"2025-10-11T03:55:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:44 crc kubenswrapper[4798]: I1011 03:55:44.232131 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:44 crc kubenswrapper[4798]: I1011 03:55:44.232183 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:44 crc kubenswrapper[4798]: I1011 03:55:44.232198 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:44 crc kubenswrapper[4798]: I1011 03:55:44.232218 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:44 crc kubenswrapper[4798]: I1011 03:55:44.232232 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:44Z","lastTransitionTime":"2025-10-11T03:55:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:44 crc kubenswrapper[4798]: I1011 03:55:44.335453 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:44 crc kubenswrapper[4798]: I1011 03:55:44.335491 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:44 crc kubenswrapper[4798]: I1011 03:55:44.335502 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:44 crc kubenswrapper[4798]: I1011 03:55:44.335517 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:44 crc kubenswrapper[4798]: I1011 03:55:44.335528 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:44Z","lastTransitionTime":"2025-10-11T03:55:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:44 crc kubenswrapper[4798]: I1011 03:55:44.437713 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:44 crc kubenswrapper[4798]: I1011 03:55:44.437989 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:44 crc kubenswrapper[4798]: I1011 03:55:44.438111 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:44 crc kubenswrapper[4798]: I1011 03:55:44.438194 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:44 crc kubenswrapper[4798]: I1011 03:55:44.438276 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:44Z","lastTransitionTime":"2025-10-11T03:55:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:44 crc kubenswrapper[4798]: I1011 03:55:44.540436 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:44 crc kubenswrapper[4798]: I1011 03:55:44.540481 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:44 crc kubenswrapper[4798]: I1011 03:55:44.540500 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:44 crc kubenswrapper[4798]: I1011 03:55:44.540516 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:44 crc kubenswrapper[4798]: I1011 03:55:44.540526 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:44Z","lastTransitionTime":"2025-10-11T03:55:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:44 crc kubenswrapper[4798]: I1011 03:55:44.642885 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:44 crc kubenswrapper[4798]: I1011 03:55:44.642922 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:44 crc kubenswrapper[4798]: I1011 03:55:44.642933 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:44 crc kubenswrapper[4798]: I1011 03:55:44.642951 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:44 crc kubenswrapper[4798]: I1011 03:55:44.642964 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:44Z","lastTransitionTime":"2025-10-11T03:55:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:44 crc kubenswrapper[4798]: I1011 03:55:44.745130 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:44 crc kubenswrapper[4798]: I1011 03:55:44.745186 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:44 crc kubenswrapper[4798]: I1011 03:55:44.745223 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:44 crc kubenswrapper[4798]: I1011 03:55:44.745240 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:44 crc kubenswrapper[4798]: I1011 03:55:44.745250 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:44Z","lastTransitionTime":"2025-10-11T03:55:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:44 crc kubenswrapper[4798]: I1011 03:55:44.847773 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:44 crc kubenswrapper[4798]: I1011 03:55:44.847813 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:44 crc kubenswrapper[4798]: I1011 03:55:44.847824 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:44 crc kubenswrapper[4798]: I1011 03:55:44.847838 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:44 crc kubenswrapper[4798]: I1011 03:55:44.847849 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:44Z","lastTransitionTime":"2025-10-11T03:55:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:44 crc kubenswrapper[4798]: I1011 03:55:44.949876 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:44 crc kubenswrapper[4798]: I1011 03:55:44.949926 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:44 crc kubenswrapper[4798]: I1011 03:55:44.949939 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:44 crc kubenswrapper[4798]: I1011 03:55:44.949956 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:44 crc kubenswrapper[4798]: I1011 03:55:44.949980 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:44Z","lastTransitionTime":"2025-10-11T03:55:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:45 crc kubenswrapper[4798]: I1011 03:55:45.052499 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:45 crc kubenswrapper[4798]: I1011 03:55:45.052774 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:45 crc kubenswrapper[4798]: I1011 03:55:45.052901 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:45 crc kubenswrapper[4798]: I1011 03:55:45.053000 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:45 crc kubenswrapper[4798]: I1011 03:55:45.053082 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:45Z","lastTransitionTime":"2025-10-11T03:55:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:45 crc kubenswrapper[4798]: I1011 03:55:45.155339 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:45 crc kubenswrapper[4798]: I1011 03:55:45.155386 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:45 crc kubenswrapper[4798]: I1011 03:55:45.155451 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:45 crc kubenswrapper[4798]: I1011 03:55:45.155470 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:45 crc kubenswrapper[4798]: I1011 03:55:45.155480 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:45Z","lastTransitionTime":"2025-10-11T03:55:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:45 crc kubenswrapper[4798]: I1011 03:55:45.258220 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:45 crc kubenswrapper[4798]: I1011 03:55:45.258285 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:45 crc kubenswrapper[4798]: I1011 03:55:45.258297 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:45 crc kubenswrapper[4798]: I1011 03:55:45.258323 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:45 crc kubenswrapper[4798]: I1011 03:55:45.258335 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:45Z","lastTransitionTime":"2025-10-11T03:55:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:45 crc kubenswrapper[4798]: I1011 03:55:45.361486 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:45 crc kubenswrapper[4798]: I1011 03:55:45.361804 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:45 crc kubenswrapper[4798]: I1011 03:55:45.361935 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:45 crc kubenswrapper[4798]: I1011 03:55:45.362070 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:45 crc kubenswrapper[4798]: I1011 03:55:45.362236 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:45Z","lastTransitionTime":"2025-10-11T03:55:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:45 crc kubenswrapper[4798]: I1011 03:55:45.423366 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 11 03:55:45 crc kubenswrapper[4798]: I1011 03:55:45.423414 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-5bfzt" Oct 11 03:55:45 crc kubenswrapper[4798]: I1011 03:55:45.423556 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 11 03:55:45 crc kubenswrapper[4798]: E1011 03:55:45.423709 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 11 03:55:45 crc kubenswrapper[4798]: I1011 03:55:45.423821 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 11 03:55:45 crc kubenswrapper[4798]: E1011 03:55:45.423846 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-5bfzt" podUID="3cf2b185-9a26-4448-8fc5-4885f98daf87" Oct 11 03:55:45 crc kubenswrapper[4798]: E1011 03:55:45.423990 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 11 03:55:45 crc kubenswrapper[4798]: E1011 03:55:45.424108 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 11 03:55:45 crc kubenswrapper[4798]: I1011 03:55:45.444249 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:45Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:45 crc kubenswrapper[4798]: I1011 03:55:45.455827 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42571bc8-2186-4e3b-bba9-28f5a8f364d0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c66c40f71f0877eee058976f94625fe1c4bcab5dfad363be39e6863c2e9f07a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5gdw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d4862faa12fa49bd57098b8b4e9e9dd287da8c8808a9ca32bfdd3a9b4313945d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5gdw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-h28s2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:45Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:45 crc kubenswrapper[4798]: I1011 03:55:45.464812 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:45 crc kubenswrapper[4798]: I1011 03:55:45.464846 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:45 crc kubenswrapper[4798]: I1011 03:55:45.464855 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:45 crc kubenswrapper[4798]: I1011 03:55:45.464867 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:45 crc kubenswrapper[4798]: I1011 03:55:45.464878 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:45Z","lastTransitionTime":"2025-10-11T03:55:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:45 crc kubenswrapper[4798]: I1011 03:55:45.470154 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-7fk5l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4b6a179c-1d23-4cb2-91b9-d34496a74456\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5bfabccc875a12072ad81dc6bf74d849c99baa004c10d213f798c363248137db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b908ba64c3d6d1d45e1cab82e69176c3c84bf88dde3e9425fc7c3b4c84685ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8b908ba64c3d6d1d45e1cab82e69176c3c84bf88dde3e9425fc7c3b4c84685ed\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae38c0781a4a8367bba95648253e72409e06cdc39825b137084fa6f4d33a4cb7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ae38c0781a4a8367bba95648253e72409e06cdc39825b137084fa6f4d33a4cb7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b663ffff34d80940880b3959c81519084b55bd5edcedbce9402d5fad1f898034\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b663ffff34d80940880b3959c81519084b55bd5edcedbce9402d5fad1f898034\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5b4df13fbe7bbb9ba49f772c2bd4bfe6a10a17bfa12bc500f0647659be7b0cdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5b4df13fbe7bbb9ba49f772c2bd4bfe6a10a17bfa12bc500f0647659be7b0cdf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://997fd5172ccd393f7befe4343619dd30eba136428035ac9fc71f0ec99791a53b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://997fd5172ccd393f7befe4343619dd30eba136428035ac9fc71f0ec99791a53b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://20198b637812738041fd2e57cdb24097b8f92af726b1ad103fee423469a27136\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://20198b637812738041fd2e57cdb24097b8f92af726b1ad103fee423469a27136\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-7fk5l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:45Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:45 crc kubenswrapper[4798]: I1011 03:55:45.487802 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-svt4z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3c2a342b-5252-4957-9e2c-8f81c304b8af\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5a66de04a274f07f090f66d9da727e1985f9c171038de366b409a0300d1b6cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5667e3d7288833ff8c576f64f111c3a2f59619a3577f969d0afdd633962bd30d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dd63d88fe2ebe6035765ee3f3c05c2ef747f9b2869da239825c08667935c9fb5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://671945e25068ff79ab8574dd96b2d81646b8240da41f34a10aa857f0c2520df2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://337fb496a29d8027f6400b57b1625b68029a16e368139f753b9b94df22448477\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c9529e6c53d42e111799753c41ef620b382b2a2b1991aa71d4ee846029a5136\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0c1c7c6aa877690a8caf39779fec8180efdbf7169264c931008c3f639d938242\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0c1c7c6aa877690a8caf39779fec8180efdbf7169264c931008c3f639d938242\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-11T03:55:38Z\\\",\\\"message\\\":\\\"gressip/v1/apis/informers/externalversions/factory.go:140\\\\nI1011 03:55:37.822041 6251 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1011 03:55:37.822100 6251 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1011 03:55:37.822166 6251 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1011 03:55:37.822165 6251 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1011 03:55:37.822204 6251 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1011 03:55:37.822214 6251 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1011 03:55:37.822229 6251 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1011 03:55:37.822237 6251 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1011 03:55:37.822241 6251 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1011 03:55:37.822251 6251 factory.go:656] Stopping watch factory\\\\nI1011 03:55:37.822260 6251 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1011 03:55:37.822266 6251 ovnkube.go:599] Stopped ovnkube\\\\nI1011 03:55:37.822269 6251 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1011 03:55:37.822277 6251 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1011 03:55:37.822302 6251 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1011 03:55:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:36Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-svt4z_openshift-ovn-kubernetes(3c2a342b-5252-4957-9e2c-8f81c304b8af)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://732433bbe785758ed4cbedf667608c4780a5d1e7e2eafd432735f0d74ea01d48\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9f8c139ffa0a5dc24b40f6d5bcc2fa9422f58b275692d3ce3ba5a928c1e454a7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f8c139ffa0a5dc24b40f6d5bcc2fa9422f58b275692d3ce3ba5a928c1e454a7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:26Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-svt4z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:45Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:45 crc kubenswrapper[4798]: I1011 03:55:45.502937 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-cwk9h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a97c4ffd-df63-4ce6-8903-80849a23a9fa\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3dc58b162f1d2b3447282c149ec7cd6d60b4c53fa3cf51dfb4a821d115c9a2c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pfvbf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:28Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-cwk9h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:45Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:45 crc kubenswrapper[4798]: I1011 03:55:45.518245 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:45Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:45 crc kubenswrapper[4798]: I1011 03:55:45.530312 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bfd670e2-5a6d-4d56-97d6-c72b5474cfe6\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d79289a0b2f4d9eee836f57ba898bd329060194772e80855547f1982371b0921\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1970a648bf4ee991b9b58bfade524bee260b45f727cba5864a6545a10487b3ae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a79edde80930f3aad632c14d6ad6b3df6f8449f32107a82eb906785e873e1ab7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4b8e09f509a9f25922d49024be407873d79d01edb604193f9d19fcd6c89cf96d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://805f894634e76814dc3071315ddd3d8ac46cdf65a12e97f4ca56b98fe53ab25a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-11T03:55:19Z\\\",\\\"message\\\":\\\"W1011 03:55:08.564227 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1011 03:55:08.564630 1 crypto.go:601] Generating new CA for check-endpoints-signer@1760154908 cert, and key in /tmp/serving-cert-1527351980/serving-signer.crt, /tmp/serving-cert-1527351980/serving-signer.key\\\\nI1011 03:55:08.869630 1 observer_polling.go:159] Starting file observer\\\\nW1011 03:55:08.874583 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1011 03:55:08.874721 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1011 03:55:08.878058 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1527351980/tls.crt::/tmp/serving-cert-1527351980/tls.key\\\\\\\"\\\\nF1011 03:55:19.813421 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:08Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://58ca7c079d81ba990c4508c024d6db7c3503be1d628477798459b32e2374ef22\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:08Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1171e7b719c532d99d1d222fc5dbc434060d5d149bd2b6b3dea02f9716816b26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1171e7b719c532d99d1d222fc5dbc434060d5d149bd2b6b3dea02f9716816b26\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:05Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:45Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:45 crc kubenswrapper[4798]: I1011 03:55:45.543686 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2f4f8842-e378-44a9-b919-b1bf9a8df80e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://903e13c07c622087e30f21ffff2b60c31c852d8e994c23d7af6beab5a71bb873\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1087e7b8de486a192155ed94497edbca0dc98e4a84a577267fb132fc48e6b142\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://08a33c11cee6a48ebcf66f251111b8bd888bd40fe6b52f43a3768eac817ed59f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eff419f507fc5362e956419c6fd14dc6a161f7297b026a5ec449647051f98767\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:05Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:45Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:45 crc kubenswrapper[4798]: I1011 03:55:45.557701 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4936cbe98df337a2b4d0d6e3effe0afd990facff65daf923c475c9108e05fef9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:45Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:45 crc kubenswrapper[4798]: I1011 03:55:45.566934 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:45 crc kubenswrapper[4798]: I1011 03:55:45.566973 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:45 crc kubenswrapper[4798]: I1011 03:55:45.566982 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:45 crc kubenswrapper[4798]: I1011 03:55:45.566997 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:45 crc kubenswrapper[4798]: I1011 03:55:45.567007 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:45Z","lastTransitionTime":"2025-10-11T03:55:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:45 crc kubenswrapper[4798]: I1011 03:55:45.570206 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:45Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:45 crc kubenswrapper[4798]: I1011 03:55:45.580331 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kszgd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e999fc8d-dd8d-415f-b72b-1c6189cf18a9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a92a11778001614d226c95af9dde787c78232568c3b5c073618a843f1225b21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h6wm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kszgd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:45Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:45 crc kubenswrapper[4798]: I1011 03:55:45.591282 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5ec61df088ad49a0392264c33da84b7324c4aa608201d71e309401ab80013fb8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://10697a88b117962307b632f70354acdfd493e92c2909908a31c90c754dcbc0ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:45Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:45 crc kubenswrapper[4798]: I1011 03:55:45.602684 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-5bfzt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3cf2b185-9a26-4448-8fc5-4885f98daf87\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jdt7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jdt7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:39Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-5bfzt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:45Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:45 crc kubenswrapper[4798]: I1011 03:55:45.619065 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6129cc4f78a62c5fb76f4859ec954c8901954d27b7f05672e97d992e0c933191\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:45Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:45 crc kubenswrapper[4798]: I1011 03:55:45.632756 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-p6xdd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bd9c0e44-3329-422a-907b-e9e9bb6194cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a46e4bfff29af67bd6304e68df8fb4aa7e2184648b85398f453fde878b27d825\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p9wst\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-p6xdd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:45Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:45 crc kubenswrapper[4798]: I1011 03:55:45.643277 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-5sxhs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"89cbb20d-9fdb-4fed-9ad2-acb1207afc46\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a3655b5826d719e04f0972221ea72554a15025acffa8e2689b4b2855d6151d9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z7jvd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f0c589033410abafbc39c6a7386343c70bb92ed0a1d7336188a44cc6e4fd65c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z7jvd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-5sxhs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:45Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:45 crc kubenswrapper[4798]: I1011 03:55:45.669134 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:45 crc kubenswrapper[4798]: I1011 03:55:45.669184 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:45 crc kubenswrapper[4798]: I1011 03:55:45.669198 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:45 crc kubenswrapper[4798]: I1011 03:55:45.669217 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:45 crc kubenswrapper[4798]: I1011 03:55:45.669230 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:45Z","lastTransitionTime":"2025-10-11T03:55:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:45 crc kubenswrapper[4798]: I1011 03:55:45.773268 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:45 crc kubenswrapper[4798]: I1011 03:55:45.773346 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:45 crc kubenswrapper[4798]: I1011 03:55:45.773371 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:45 crc kubenswrapper[4798]: I1011 03:55:45.773443 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:45 crc kubenswrapper[4798]: I1011 03:55:45.773489 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:45Z","lastTransitionTime":"2025-10-11T03:55:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:45 crc kubenswrapper[4798]: I1011 03:55:45.875971 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:45 crc kubenswrapper[4798]: I1011 03:55:45.876022 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:45 crc kubenswrapper[4798]: I1011 03:55:45.876050 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:45 crc kubenswrapper[4798]: I1011 03:55:45.876065 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:45 crc kubenswrapper[4798]: I1011 03:55:45.876094 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:45Z","lastTransitionTime":"2025-10-11T03:55:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:45 crc kubenswrapper[4798]: I1011 03:55:45.980327 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:45 crc kubenswrapper[4798]: I1011 03:55:45.980415 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:45 crc kubenswrapper[4798]: I1011 03:55:45.980430 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:45 crc kubenswrapper[4798]: I1011 03:55:45.980451 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:45 crc kubenswrapper[4798]: I1011 03:55:45.980465 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:45Z","lastTransitionTime":"2025-10-11T03:55:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:46 crc kubenswrapper[4798]: I1011 03:55:46.082903 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:46 crc kubenswrapper[4798]: I1011 03:55:46.082972 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:46 crc kubenswrapper[4798]: I1011 03:55:46.082987 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:46 crc kubenswrapper[4798]: I1011 03:55:46.083010 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:46 crc kubenswrapper[4798]: I1011 03:55:46.083024 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:46Z","lastTransitionTime":"2025-10-11T03:55:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:46 crc kubenswrapper[4798]: I1011 03:55:46.186452 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:46 crc kubenswrapper[4798]: I1011 03:55:46.186530 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:46 crc kubenswrapper[4798]: I1011 03:55:46.186553 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:46 crc kubenswrapper[4798]: I1011 03:55:46.186585 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:46 crc kubenswrapper[4798]: I1011 03:55:46.186604 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:46Z","lastTransitionTime":"2025-10-11T03:55:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:46 crc kubenswrapper[4798]: I1011 03:55:46.288863 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:46 crc kubenswrapper[4798]: I1011 03:55:46.288895 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:46 crc kubenswrapper[4798]: I1011 03:55:46.288903 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:46 crc kubenswrapper[4798]: I1011 03:55:46.288915 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:46 crc kubenswrapper[4798]: I1011 03:55:46.288924 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:46Z","lastTransitionTime":"2025-10-11T03:55:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:46 crc kubenswrapper[4798]: I1011 03:55:46.392003 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:46 crc kubenswrapper[4798]: I1011 03:55:46.392040 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:46 crc kubenswrapper[4798]: I1011 03:55:46.392052 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:46 crc kubenswrapper[4798]: I1011 03:55:46.392070 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:46 crc kubenswrapper[4798]: I1011 03:55:46.392080 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:46Z","lastTransitionTime":"2025-10-11T03:55:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:46 crc kubenswrapper[4798]: I1011 03:55:46.500264 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:46 crc kubenswrapper[4798]: I1011 03:55:46.500297 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:46 crc kubenswrapper[4798]: I1011 03:55:46.500308 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:46 crc kubenswrapper[4798]: I1011 03:55:46.500521 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:46 crc kubenswrapper[4798]: I1011 03:55:46.500539 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:46Z","lastTransitionTime":"2025-10-11T03:55:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:46 crc kubenswrapper[4798]: I1011 03:55:46.603950 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:46 crc kubenswrapper[4798]: I1011 03:55:46.604022 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:46 crc kubenswrapper[4798]: I1011 03:55:46.604046 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:46 crc kubenswrapper[4798]: I1011 03:55:46.604080 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:46 crc kubenswrapper[4798]: I1011 03:55:46.604107 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:46Z","lastTransitionTime":"2025-10-11T03:55:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:46 crc kubenswrapper[4798]: I1011 03:55:46.706967 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:46 crc kubenswrapper[4798]: I1011 03:55:46.707208 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:46 crc kubenswrapper[4798]: I1011 03:55:46.707227 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:46 crc kubenswrapper[4798]: I1011 03:55:46.707253 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:46 crc kubenswrapper[4798]: I1011 03:55:46.707288 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:46Z","lastTransitionTime":"2025-10-11T03:55:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:46 crc kubenswrapper[4798]: I1011 03:55:46.809764 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:46 crc kubenswrapper[4798]: I1011 03:55:46.809817 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:46 crc kubenswrapper[4798]: I1011 03:55:46.809828 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:46 crc kubenswrapper[4798]: I1011 03:55:46.809843 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:46 crc kubenswrapper[4798]: I1011 03:55:46.809851 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:46Z","lastTransitionTime":"2025-10-11T03:55:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:46 crc kubenswrapper[4798]: I1011 03:55:46.912689 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:46 crc kubenswrapper[4798]: I1011 03:55:46.912768 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:46 crc kubenswrapper[4798]: I1011 03:55:46.912790 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:46 crc kubenswrapper[4798]: I1011 03:55:46.912819 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:46 crc kubenswrapper[4798]: I1011 03:55:46.912840 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:46Z","lastTransitionTime":"2025-10-11T03:55:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:47 crc kubenswrapper[4798]: I1011 03:55:47.015812 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:47 crc kubenswrapper[4798]: I1011 03:55:47.015847 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:47 crc kubenswrapper[4798]: I1011 03:55:47.015855 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:47 crc kubenswrapper[4798]: I1011 03:55:47.015871 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:47 crc kubenswrapper[4798]: I1011 03:55:47.015879 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:47Z","lastTransitionTime":"2025-10-11T03:55:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:47 crc kubenswrapper[4798]: I1011 03:55:47.119176 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:47 crc kubenswrapper[4798]: I1011 03:55:47.119218 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:47 crc kubenswrapper[4798]: I1011 03:55:47.119228 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:47 crc kubenswrapper[4798]: I1011 03:55:47.119243 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:47 crc kubenswrapper[4798]: I1011 03:55:47.119252 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:47Z","lastTransitionTime":"2025-10-11T03:55:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:47 crc kubenswrapper[4798]: I1011 03:55:47.221596 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:47 crc kubenswrapper[4798]: I1011 03:55:47.221654 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:47 crc kubenswrapper[4798]: I1011 03:55:47.221672 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:47 crc kubenswrapper[4798]: I1011 03:55:47.221703 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:47 crc kubenswrapper[4798]: I1011 03:55:47.221720 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:47Z","lastTransitionTime":"2025-10-11T03:55:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:47 crc kubenswrapper[4798]: I1011 03:55:47.293144 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/3cf2b185-9a26-4448-8fc5-4885f98daf87-metrics-certs\") pod \"network-metrics-daemon-5bfzt\" (UID: \"3cf2b185-9a26-4448-8fc5-4885f98daf87\") " pod="openshift-multus/network-metrics-daemon-5bfzt" Oct 11 03:55:47 crc kubenswrapper[4798]: E1011 03:55:47.293340 4798 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Oct 11 03:55:47 crc kubenswrapper[4798]: E1011 03:55:47.293476 4798 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/3cf2b185-9a26-4448-8fc5-4885f98daf87-metrics-certs podName:3cf2b185-9a26-4448-8fc5-4885f98daf87 nodeName:}" failed. No retries permitted until 2025-10-11 03:55:55.29345259 +0000 UTC m=+50.629742306 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/3cf2b185-9a26-4448-8fc5-4885f98daf87-metrics-certs") pod "network-metrics-daemon-5bfzt" (UID: "3cf2b185-9a26-4448-8fc5-4885f98daf87") : object "openshift-multus"/"metrics-daemon-secret" not registered Oct 11 03:55:47 crc kubenswrapper[4798]: I1011 03:55:47.323831 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:47 crc kubenswrapper[4798]: I1011 03:55:47.323887 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:47 crc kubenswrapper[4798]: I1011 03:55:47.323903 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:47 crc kubenswrapper[4798]: I1011 03:55:47.323926 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:47 crc kubenswrapper[4798]: I1011 03:55:47.323944 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:47Z","lastTransitionTime":"2025-10-11T03:55:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:47 crc kubenswrapper[4798]: I1011 03:55:47.423756 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 11 03:55:47 crc kubenswrapper[4798]: I1011 03:55:47.423829 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 11 03:55:47 crc kubenswrapper[4798]: I1011 03:55:47.423775 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-5bfzt" Oct 11 03:55:47 crc kubenswrapper[4798]: I1011 03:55:47.423975 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 11 03:55:47 crc kubenswrapper[4798]: E1011 03:55:47.424095 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-5bfzt" podUID="3cf2b185-9a26-4448-8fc5-4885f98daf87" Oct 11 03:55:47 crc kubenswrapper[4798]: E1011 03:55:47.424213 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 11 03:55:47 crc kubenswrapper[4798]: E1011 03:55:47.423964 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 11 03:55:47 crc kubenswrapper[4798]: E1011 03:55:47.424434 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 11 03:55:47 crc kubenswrapper[4798]: I1011 03:55:47.426437 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:47 crc kubenswrapper[4798]: I1011 03:55:47.426504 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:47 crc kubenswrapper[4798]: I1011 03:55:47.426528 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:47 crc kubenswrapper[4798]: I1011 03:55:47.426552 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:47 crc kubenswrapper[4798]: I1011 03:55:47.426570 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:47Z","lastTransitionTime":"2025-10-11T03:55:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:47 crc kubenswrapper[4798]: I1011 03:55:47.529881 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:47 crc kubenswrapper[4798]: I1011 03:55:47.529945 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:47 crc kubenswrapper[4798]: I1011 03:55:47.529967 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:47 crc kubenswrapper[4798]: I1011 03:55:47.529997 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:47 crc kubenswrapper[4798]: I1011 03:55:47.530020 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:47Z","lastTransitionTime":"2025-10-11T03:55:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:47 crc kubenswrapper[4798]: I1011 03:55:47.634464 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:47 crc kubenswrapper[4798]: I1011 03:55:47.634528 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:47 crc kubenswrapper[4798]: I1011 03:55:47.634551 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:47 crc kubenswrapper[4798]: I1011 03:55:47.634579 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:47 crc kubenswrapper[4798]: I1011 03:55:47.634604 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:47Z","lastTransitionTime":"2025-10-11T03:55:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:47 crc kubenswrapper[4798]: I1011 03:55:47.736948 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:47 crc kubenswrapper[4798]: I1011 03:55:47.737017 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:47 crc kubenswrapper[4798]: I1011 03:55:47.737040 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:47 crc kubenswrapper[4798]: I1011 03:55:47.737068 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:47 crc kubenswrapper[4798]: I1011 03:55:47.737276 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:47Z","lastTransitionTime":"2025-10-11T03:55:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:47 crc kubenswrapper[4798]: I1011 03:55:47.840763 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:47 crc kubenswrapper[4798]: I1011 03:55:47.840866 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:47 crc kubenswrapper[4798]: I1011 03:55:47.840891 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:47 crc kubenswrapper[4798]: I1011 03:55:47.840924 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:47 crc kubenswrapper[4798]: I1011 03:55:47.840949 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:47Z","lastTransitionTime":"2025-10-11T03:55:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:47 crc kubenswrapper[4798]: I1011 03:55:47.944165 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:47 crc kubenswrapper[4798]: I1011 03:55:47.944529 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:47 crc kubenswrapper[4798]: I1011 03:55:47.944662 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:47 crc kubenswrapper[4798]: I1011 03:55:47.944787 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:47 crc kubenswrapper[4798]: I1011 03:55:47.944925 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:47Z","lastTransitionTime":"2025-10-11T03:55:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:48 crc kubenswrapper[4798]: I1011 03:55:48.048560 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:48 crc kubenswrapper[4798]: I1011 03:55:48.048644 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:48 crc kubenswrapper[4798]: I1011 03:55:48.048667 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:48 crc kubenswrapper[4798]: I1011 03:55:48.048700 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:48 crc kubenswrapper[4798]: I1011 03:55:48.048726 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:48Z","lastTransitionTime":"2025-10-11T03:55:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:48 crc kubenswrapper[4798]: I1011 03:55:48.152433 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:48 crc kubenswrapper[4798]: I1011 03:55:48.152489 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:48 crc kubenswrapper[4798]: I1011 03:55:48.152502 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:48 crc kubenswrapper[4798]: I1011 03:55:48.152522 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:48 crc kubenswrapper[4798]: I1011 03:55:48.152534 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:48Z","lastTransitionTime":"2025-10-11T03:55:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:48 crc kubenswrapper[4798]: I1011 03:55:48.256850 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:48 crc kubenswrapper[4798]: I1011 03:55:48.256947 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:48 crc kubenswrapper[4798]: I1011 03:55:48.256958 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:48 crc kubenswrapper[4798]: I1011 03:55:48.256981 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:48 crc kubenswrapper[4798]: I1011 03:55:48.256996 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:48Z","lastTransitionTime":"2025-10-11T03:55:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:48 crc kubenswrapper[4798]: I1011 03:55:48.360543 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:48 crc kubenswrapper[4798]: I1011 03:55:48.360617 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:48 crc kubenswrapper[4798]: I1011 03:55:48.360639 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:48 crc kubenswrapper[4798]: I1011 03:55:48.360687 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:48 crc kubenswrapper[4798]: I1011 03:55:48.360725 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:48Z","lastTransitionTime":"2025-10-11T03:55:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:48 crc kubenswrapper[4798]: I1011 03:55:48.463446 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:48 crc kubenswrapper[4798]: I1011 03:55:48.463721 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:48 crc kubenswrapper[4798]: I1011 03:55:48.463785 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:48 crc kubenswrapper[4798]: I1011 03:55:48.463846 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:48 crc kubenswrapper[4798]: I1011 03:55:48.463903 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:48Z","lastTransitionTime":"2025-10-11T03:55:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:48 crc kubenswrapper[4798]: I1011 03:55:48.565940 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:48 crc kubenswrapper[4798]: I1011 03:55:48.566001 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:48 crc kubenswrapper[4798]: I1011 03:55:48.566024 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:48 crc kubenswrapper[4798]: I1011 03:55:48.566047 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:48 crc kubenswrapper[4798]: I1011 03:55:48.566064 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:48Z","lastTransitionTime":"2025-10-11T03:55:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:48 crc kubenswrapper[4798]: I1011 03:55:48.669017 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:48 crc kubenswrapper[4798]: I1011 03:55:48.669079 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:48 crc kubenswrapper[4798]: I1011 03:55:48.669093 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:48 crc kubenswrapper[4798]: I1011 03:55:48.669111 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:48 crc kubenswrapper[4798]: I1011 03:55:48.669123 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:48Z","lastTransitionTime":"2025-10-11T03:55:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:48 crc kubenswrapper[4798]: I1011 03:55:48.773194 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:48 crc kubenswrapper[4798]: I1011 03:55:48.773489 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:48 crc kubenswrapper[4798]: I1011 03:55:48.773574 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:48 crc kubenswrapper[4798]: I1011 03:55:48.773638 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:48 crc kubenswrapper[4798]: I1011 03:55:48.773694 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:48Z","lastTransitionTime":"2025-10-11T03:55:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:48 crc kubenswrapper[4798]: I1011 03:55:48.877041 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:48 crc kubenswrapper[4798]: I1011 03:55:48.877086 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:48 crc kubenswrapper[4798]: I1011 03:55:48.877100 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:48 crc kubenswrapper[4798]: I1011 03:55:48.877120 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:48 crc kubenswrapper[4798]: I1011 03:55:48.877133 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:48Z","lastTransitionTime":"2025-10-11T03:55:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:48 crc kubenswrapper[4798]: I1011 03:55:48.979970 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:48 crc kubenswrapper[4798]: I1011 03:55:48.980035 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:48 crc kubenswrapper[4798]: I1011 03:55:48.980045 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:48 crc kubenswrapper[4798]: I1011 03:55:48.980061 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:48 crc kubenswrapper[4798]: I1011 03:55:48.980074 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:48Z","lastTransitionTime":"2025-10-11T03:55:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:49 crc kubenswrapper[4798]: I1011 03:55:49.083697 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:49 crc kubenswrapper[4798]: I1011 03:55:49.083786 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:49 crc kubenswrapper[4798]: I1011 03:55:49.083801 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:49 crc kubenswrapper[4798]: I1011 03:55:49.083819 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:49 crc kubenswrapper[4798]: I1011 03:55:49.083856 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:49Z","lastTransitionTime":"2025-10-11T03:55:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:49 crc kubenswrapper[4798]: I1011 03:55:49.187589 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:49 crc kubenswrapper[4798]: I1011 03:55:49.187655 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:49 crc kubenswrapper[4798]: I1011 03:55:49.187674 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:49 crc kubenswrapper[4798]: I1011 03:55:49.187719 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:49 crc kubenswrapper[4798]: I1011 03:55:49.187757 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:49Z","lastTransitionTime":"2025-10-11T03:55:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:49 crc kubenswrapper[4798]: I1011 03:55:49.292178 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:49 crc kubenswrapper[4798]: I1011 03:55:49.292246 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:49 crc kubenswrapper[4798]: I1011 03:55:49.292255 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:49 crc kubenswrapper[4798]: I1011 03:55:49.292277 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:49 crc kubenswrapper[4798]: I1011 03:55:49.292289 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:49Z","lastTransitionTime":"2025-10-11T03:55:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:49 crc kubenswrapper[4798]: I1011 03:55:49.396032 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:49 crc kubenswrapper[4798]: I1011 03:55:49.396084 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:49 crc kubenswrapper[4798]: I1011 03:55:49.396099 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:49 crc kubenswrapper[4798]: I1011 03:55:49.396120 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:49 crc kubenswrapper[4798]: I1011 03:55:49.396136 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:49Z","lastTransitionTime":"2025-10-11T03:55:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:49 crc kubenswrapper[4798]: I1011 03:55:49.422819 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 11 03:55:49 crc kubenswrapper[4798]: I1011 03:55:49.422929 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 11 03:55:49 crc kubenswrapper[4798]: I1011 03:55:49.422923 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 11 03:55:49 crc kubenswrapper[4798]: I1011 03:55:49.422883 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-5bfzt" Oct 11 03:55:49 crc kubenswrapper[4798]: E1011 03:55:49.423056 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 11 03:55:49 crc kubenswrapper[4798]: E1011 03:55:49.423120 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 11 03:55:49 crc kubenswrapper[4798]: E1011 03:55:49.423199 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-5bfzt" podUID="3cf2b185-9a26-4448-8fc5-4885f98daf87" Oct 11 03:55:49 crc kubenswrapper[4798]: E1011 03:55:49.423251 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 11 03:55:49 crc kubenswrapper[4798]: I1011 03:55:49.499608 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:49 crc kubenswrapper[4798]: I1011 03:55:49.499984 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:49 crc kubenswrapper[4798]: I1011 03:55:49.500084 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:49 crc kubenswrapper[4798]: I1011 03:55:49.500195 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:49 crc kubenswrapper[4798]: I1011 03:55:49.500306 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:49Z","lastTransitionTime":"2025-10-11T03:55:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:49 crc kubenswrapper[4798]: I1011 03:55:49.603442 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:49 crc kubenswrapper[4798]: I1011 03:55:49.603538 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:49 crc kubenswrapper[4798]: I1011 03:55:49.603558 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:49 crc kubenswrapper[4798]: I1011 03:55:49.603584 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:49 crc kubenswrapper[4798]: I1011 03:55:49.603649 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:49Z","lastTransitionTime":"2025-10-11T03:55:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:49 crc kubenswrapper[4798]: I1011 03:55:49.706040 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:49 crc kubenswrapper[4798]: I1011 03:55:49.706112 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:49 crc kubenswrapper[4798]: I1011 03:55:49.706122 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:49 crc kubenswrapper[4798]: I1011 03:55:49.706205 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:49 crc kubenswrapper[4798]: I1011 03:55:49.706219 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:49Z","lastTransitionTime":"2025-10-11T03:55:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:49 crc kubenswrapper[4798]: I1011 03:55:49.810109 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:49 crc kubenswrapper[4798]: I1011 03:55:49.810165 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:49 crc kubenswrapper[4798]: I1011 03:55:49.810181 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:49 crc kubenswrapper[4798]: I1011 03:55:49.810202 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:49 crc kubenswrapper[4798]: I1011 03:55:49.810218 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:49Z","lastTransitionTime":"2025-10-11T03:55:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:49 crc kubenswrapper[4798]: I1011 03:55:49.914553 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:49 crc kubenswrapper[4798]: I1011 03:55:49.914622 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:49 crc kubenswrapper[4798]: I1011 03:55:49.914643 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:49 crc kubenswrapper[4798]: I1011 03:55:49.914671 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:49 crc kubenswrapper[4798]: I1011 03:55:49.914692 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:49Z","lastTransitionTime":"2025-10-11T03:55:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:50 crc kubenswrapper[4798]: I1011 03:55:50.019100 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:50 crc kubenswrapper[4798]: I1011 03:55:50.019190 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:50 crc kubenswrapper[4798]: I1011 03:55:50.019206 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:50 crc kubenswrapper[4798]: I1011 03:55:50.019232 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:50 crc kubenswrapper[4798]: I1011 03:55:50.019247 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:50Z","lastTransitionTime":"2025-10-11T03:55:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:50 crc kubenswrapper[4798]: I1011 03:55:50.122569 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:50 crc kubenswrapper[4798]: I1011 03:55:50.122624 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:50 crc kubenswrapper[4798]: I1011 03:55:50.122644 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:50 crc kubenswrapper[4798]: I1011 03:55:50.122670 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:50 crc kubenswrapper[4798]: I1011 03:55:50.122689 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:50Z","lastTransitionTime":"2025-10-11T03:55:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:50 crc kubenswrapper[4798]: I1011 03:55:50.226664 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:50 crc kubenswrapper[4798]: I1011 03:55:50.226754 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:50 crc kubenswrapper[4798]: I1011 03:55:50.226773 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:50 crc kubenswrapper[4798]: I1011 03:55:50.226806 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:50 crc kubenswrapper[4798]: I1011 03:55:50.226828 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:50Z","lastTransitionTime":"2025-10-11T03:55:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:50 crc kubenswrapper[4798]: I1011 03:55:50.330858 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:50 crc kubenswrapper[4798]: I1011 03:55:50.330921 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:50 crc kubenswrapper[4798]: I1011 03:55:50.330941 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:50 crc kubenswrapper[4798]: I1011 03:55:50.330967 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:50 crc kubenswrapper[4798]: I1011 03:55:50.330987 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:50Z","lastTransitionTime":"2025-10-11T03:55:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:50 crc kubenswrapper[4798]: I1011 03:55:50.434559 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:50 crc kubenswrapper[4798]: I1011 03:55:50.434607 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:50 crc kubenswrapper[4798]: I1011 03:55:50.434621 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:50 crc kubenswrapper[4798]: I1011 03:55:50.434644 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:50 crc kubenswrapper[4798]: I1011 03:55:50.434659 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:50Z","lastTransitionTime":"2025-10-11T03:55:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:50 crc kubenswrapper[4798]: I1011 03:55:50.538202 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:50 crc kubenswrapper[4798]: I1011 03:55:50.538280 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:50 crc kubenswrapper[4798]: I1011 03:55:50.538302 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:50 crc kubenswrapper[4798]: I1011 03:55:50.538336 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:50 crc kubenswrapper[4798]: I1011 03:55:50.538358 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:50Z","lastTransitionTime":"2025-10-11T03:55:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:50 crc kubenswrapper[4798]: I1011 03:55:50.642921 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:50 crc kubenswrapper[4798]: I1011 03:55:50.643001 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:50 crc kubenswrapper[4798]: I1011 03:55:50.643025 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:50 crc kubenswrapper[4798]: I1011 03:55:50.643062 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:50 crc kubenswrapper[4798]: I1011 03:55:50.643086 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:50Z","lastTransitionTime":"2025-10-11T03:55:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:50 crc kubenswrapper[4798]: I1011 03:55:50.765313 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:50 crc kubenswrapper[4798]: I1011 03:55:50.765379 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:50 crc kubenswrapper[4798]: I1011 03:55:50.765426 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:50 crc kubenswrapper[4798]: I1011 03:55:50.765454 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:50 crc kubenswrapper[4798]: I1011 03:55:50.765467 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:50Z","lastTransitionTime":"2025-10-11T03:55:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:50 crc kubenswrapper[4798]: I1011 03:55:50.868933 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:50 crc kubenswrapper[4798]: I1011 03:55:50.869000 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:50 crc kubenswrapper[4798]: I1011 03:55:50.869014 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:50 crc kubenswrapper[4798]: I1011 03:55:50.869034 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:50 crc kubenswrapper[4798]: I1011 03:55:50.869049 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:50Z","lastTransitionTime":"2025-10-11T03:55:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:50 crc kubenswrapper[4798]: I1011 03:55:50.971771 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:50 crc kubenswrapper[4798]: I1011 03:55:50.971848 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:50 crc kubenswrapper[4798]: I1011 03:55:50.971866 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:50 crc kubenswrapper[4798]: I1011 03:55:50.971894 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:50 crc kubenswrapper[4798]: I1011 03:55:50.971914 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:50Z","lastTransitionTime":"2025-10-11T03:55:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:51 crc kubenswrapper[4798]: I1011 03:55:51.074568 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:51 crc kubenswrapper[4798]: I1011 03:55:51.074635 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:51 crc kubenswrapper[4798]: I1011 03:55:51.074649 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:51 crc kubenswrapper[4798]: I1011 03:55:51.074678 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:51 crc kubenswrapper[4798]: I1011 03:55:51.074697 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:51Z","lastTransitionTime":"2025-10-11T03:55:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:51 crc kubenswrapper[4798]: I1011 03:55:51.178078 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:51 crc kubenswrapper[4798]: I1011 03:55:51.178134 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:51 crc kubenswrapper[4798]: I1011 03:55:51.178146 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:51 crc kubenswrapper[4798]: I1011 03:55:51.178167 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:51 crc kubenswrapper[4798]: I1011 03:55:51.178182 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:51Z","lastTransitionTime":"2025-10-11T03:55:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:51 crc kubenswrapper[4798]: I1011 03:55:51.281542 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:51 crc kubenswrapper[4798]: I1011 03:55:51.281607 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:51 crc kubenswrapper[4798]: I1011 03:55:51.281622 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:51 crc kubenswrapper[4798]: I1011 03:55:51.281645 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:51 crc kubenswrapper[4798]: I1011 03:55:51.281661 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:51Z","lastTransitionTime":"2025-10-11T03:55:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:51 crc kubenswrapper[4798]: I1011 03:55:51.385515 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:51 crc kubenswrapper[4798]: I1011 03:55:51.385570 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:51 crc kubenswrapper[4798]: I1011 03:55:51.385583 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:51 crc kubenswrapper[4798]: I1011 03:55:51.385629 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:51 crc kubenswrapper[4798]: I1011 03:55:51.385643 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:51Z","lastTransitionTime":"2025-10-11T03:55:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:51 crc kubenswrapper[4798]: I1011 03:55:51.423328 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 11 03:55:51 crc kubenswrapper[4798]: I1011 03:55:51.423538 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-5bfzt" Oct 11 03:55:51 crc kubenswrapper[4798]: I1011 03:55:51.423553 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 11 03:55:51 crc kubenswrapper[4798]: I1011 03:55:51.423748 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 11 03:55:51 crc kubenswrapper[4798]: E1011 03:55:51.423744 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 11 03:55:51 crc kubenswrapper[4798]: E1011 03:55:51.424188 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 11 03:55:51 crc kubenswrapper[4798]: E1011 03:55:51.424263 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-5bfzt" podUID="3cf2b185-9a26-4448-8fc5-4885f98daf87" Oct 11 03:55:51 crc kubenswrapper[4798]: E1011 03:55:51.424497 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 11 03:55:51 crc kubenswrapper[4798]: I1011 03:55:51.424734 4798 scope.go:117] "RemoveContainer" containerID="0c1c7c6aa877690a8caf39779fec8180efdbf7169264c931008c3f639d938242" Oct 11 03:55:51 crc kubenswrapper[4798]: I1011 03:55:51.489143 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:51 crc kubenswrapper[4798]: I1011 03:55:51.489202 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:51 crc kubenswrapper[4798]: I1011 03:55:51.489215 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:51 crc kubenswrapper[4798]: I1011 03:55:51.489238 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:51 crc kubenswrapper[4798]: I1011 03:55:51.489256 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:51Z","lastTransitionTime":"2025-10-11T03:55:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:51 crc kubenswrapper[4798]: I1011 03:55:51.592400 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:51 crc kubenswrapper[4798]: I1011 03:55:51.592439 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:51 crc kubenswrapper[4798]: I1011 03:55:51.592449 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:51 crc kubenswrapper[4798]: I1011 03:55:51.592466 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:51 crc kubenswrapper[4798]: I1011 03:55:51.592478 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:51Z","lastTransitionTime":"2025-10-11T03:55:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:51 crc kubenswrapper[4798]: I1011 03:55:51.695643 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:51 crc kubenswrapper[4798]: I1011 03:55:51.695702 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:51 crc kubenswrapper[4798]: I1011 03:55:51.695717 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:51 crc kubenswrapper[4798]: I1011 03:55:51.695738 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:51 crc kubenswrapper[4798]: I1011 03:55:51.695780 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:51Z","lastTransitionTime":"2025-10-11T03:55:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:51 crc kubenswrapper[4798]: I1011 03:55:51.781619 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-svt4z_3c2a342b-5252-4957-9e2c-8f81c304b8af/ovnkube-controller/1.log" Oct 11 03:55:51 crc kubenswrapper[4798]: I1011 03:55:51.785467 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-svt4z" event={"ID":"3c2a342b-5252-4957-9e2c-8f81c304b8af","Type":"ContainerStarted","Data":"a9f55c50b0f40e90d5078225583f31860d2cfb67776e6b11ebdc9a41d8b20baf"} Oct 11 03:55:51 crc kubenswrapper[4798]: I1011 03:55:51.786415 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-svt4z" Oct 11 03:55:51 crc kubenswrapper[4798]: I1011 03:55:51.799926 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:51 crc kubenswrapper[4798]: I1011 03:55:51.799973 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:51 crc kubenswrapper[4798]: I1011 03:55:51.799983 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:51 crc kubenswrapper[4798]: I1011 03:55:51.799998 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:51 crc kubenswrapper[4798]: I1011 03:55:51.800009 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:51Z","lastTransitionTime":"2025-10-11T03:55:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:51 crc kubenswrapper[4798]: I1011 03:55:51.807542 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5ec61df088ad49a0392264c33da84b7324c4aa608201d71e309401ab80013fb8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://10697a88b117962307b632f70354acdfd493e92c2909908a31c90c754dcbc0ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:51Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:51 crc kubenswrapper[4798]: I1011 03:55:51.821463 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-5bfzt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3cf2b185-9a26-4448-8fc5-4885f98daf87\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jdt7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jdt7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:39Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-5bfzt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:51Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:51 crc kubenswrapper[4798]: I1011 03:55:51.835667 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6129cc4f78a62c5fb76f4859ec954c8901954d27b7f05672e97d992e0c933191\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:51Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:51 crc kubenswrapper[4798]: I1011 03:55:51.847094 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-p6xdd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bd9c0e44-3329-422a-907b-e9e9bb6194cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a46e4bfff29af67bd6304e68df8fb4aa7e2184648b85398f453fde878b27d825\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p9wst\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-p6xdd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:51Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:51 crc kubenswrapper[4798]: I1011 03:55:51.853506 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:51 crc kubenswrapper[4798]: I1011 03:55:51.853546 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:51 crc kubenswrapper[4798]: I1011 03:55:51.853557 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:51 crc kubenswrapper[4798]: I1011 03:55:51.853577 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:51 crc kubenswrapper[4798]: I1011 03:55:51.853589 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:51Z","lastTransitionTime":"2025-10-11T03:55:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:51 crc kubenswrapper[4798]: I1011 03:55:51.870133 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-5sxhs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"89cbb20d-9fdb-4fed-9ad2-acb1207afc46\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a3655b5826d719e04f0972221ea72554a15025acffa8e2689b4b2855d6151d9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z7jvd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f0c589033410abafbc39c6a7386343c70bb92ed0a1d7336188a44cc6e4fd65c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z7jvd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-5sxhs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:51Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:51 crc kubenswrapper[4798]: E1011 03:55:51.870812 4798 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T03:55:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:51Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T03:55:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:51Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T03:55:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:51Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T03:55:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:51Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"319d317c-5958-4e72-8ec1-75f04665505c\\\",\\\"systemUUID\\\":\\\"914a85fd-a642-4471-b13e-f33764f494b0\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:51Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:51 crc kubenswrapper[4798]: I1011 03:55:51.874367 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:51 crc kubenswrapper[4798]: I1011 03:55:51.874420 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:51 crc kubenswrapper[4798]: I1011 03:55:51.874453 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:51 crc kubenswrapper[4798]: I1011 03:55:51.874470 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:51 crc kubenswrapper[4798]: I1011 03:55:51.874478 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:51Z","lastTransitionTime":"2025-10-11T03:55:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:51 crc kubenswrapper[4798]: I1011 03:55:51.891615 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:51Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:51 crc kubenswrapper[4798]: E1011 03:55:51.894794 4798 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T03:55:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:51Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T03:55:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:51Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T03:55:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:51Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T03:55:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:51Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"319d317c-5958-4e72-8ec1-75f04665505c\\\",\\\"systemUUID\\\":\\\"914a85fd-a642-4471-b13e-f33764f494b0\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:51Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:51 crc kubenswrapper[4798]: I1011 03:55:51.898612 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:51 crc kubenswrapper[4798]: I1011 03:55:51.898659 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:51 crc kubenswrapper[4798]: I1011 03:55:51.898670 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:51 crc kubenswrapper[4798]: I1011 03:55:51.898686 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:51 crc kubenswrapper[4798]: I1011 03:55:51.898699 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:51Z","lastTransitionTime":"2025-10-11T03:55:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:51 crc kubenswrapper[4798]: I1011 03:55:51.905022 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42571bc8-2186-4e3b-bba9-28f5a8f364d0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c66c40f71f0877eee058976f94625fe1c4bcab5dfad363be39e6863c2e9f07a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5gdw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d4862faa12fa49bd57098b8b4e9e9dd287da8c8808a9ca32bfdd3a9b4313945d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5gdw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-h28s2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:51Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:51 crc kubenswrapper[4798]: E1011 03:55:51.915793 4798 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T03:55:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:51Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T03:55:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:51Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T03:55:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:51Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T03:55:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:51Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"319d317c-5958-4e72-8ec1-75f04665505c\\\",\\\"systemUUID\\\":\\\"914a85fd-a642-4471-b13e-f33764f494b0\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:51Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:51 crc kubenswrapper[4798]: I1011 03:55:51.922192 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:51 crc kubenswrapper[4798]: I1011 03:55:51.922230 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:51 crc kubenswrapper[4798]: I1011 03:55:51.922243 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:51 crc kubenswrapper[4798]: I1011 03:55:51.922261 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:51 crc kubenswrapper[4798]: I1011 03:55:51.922273 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:51Z","lastTransitionTime":"2025-10-11T03:55:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:51 crc kubenswrapper[4798]: I1011 03:55:51.932116 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-7fk5l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4b6a179c-1d23-4cb2-91b9-d34496a74456\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5bfabccc875a12072ad81dc6bf74d849c99baa004c10d213f798c363248137db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b908ba64c3d6d1d45e1cab82e69176c3c84bf88dde3e9425fc7c3b4c84685ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8b908ba64c3d6d1d45e1cab82e69176c3c84bf88dde3e9425fc7c3b4c84685ed\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae38c0781a4a8367bba95648253e72409e06cdc39825b137084fa6f4d33a4cb7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ae38c0781a4a8367bba95648253e72409e06cdc39825b137084fa6f4d33a4cb7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b663ffff34d80940880b3959c81519084b55bd5edcedbce9402d5fad1f898034\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b663ffff34d80940880b3959c81519084b55bd5edcedbce9402d5fad1f898034\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5b4df13fbe7bbb9ba49f772c2bd4bfe6a10a17bfa12bc500f0647659be7b0cdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5b4df13fbe7bbb9ba49f772c2bd4bfe6a10a17bfa12bc500f0647659be7b0cdf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://997fd5172ccd393f7befe4343619dd30eba136428035ac9fc71f0ec99791a53b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://997fd5172ccd393f7befe4343619dd30eba136428035ac9fc71f0ec99791a53b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://20198b637812738041fd2e57cdb24097b8f92af726b1ad103fee423469a27136\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://20198b637812738041fd2e57cdb24097b8f92af726b1ad103fee423469a27136\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-7fk5l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:51Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:51 crc kubenswrapper[4798]: E1011 03:55:51.934868 4798 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T03:55:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:51Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T03:55:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:51Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T03:55:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:51Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T03:55:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:51Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"319d317c-5958-4e72-8ec1-75f04665505c\\\",\\\"systemUUID\\\":\\\"914a85fd-a642-4471-b13e-f33764f494b0\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:51Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:51 crc kubenswrapper[4798]: I1011 03:55:51.950251 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:51 crc kubenswrapper[4798]: I1011 03:55:51.950294 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:51 crc kubenswrapper[4798]: I1011 03:55:51.950303 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:51 crc kubenswrapper[4798]: I1011 03:55:51.950318 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:51 crc kubenswrapper[4798]: I1011 03:55:51.950334 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:51Z","lastTransitionTime":"2025-10-11T03:55:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:51 crc kubenswrapper[4798]: I1011 03:55:51.955224 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-svt4z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3c2a342b-5252-4957-9e2c-8f81c304b8af\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5a66de04a274f07f090f66d9da727e1985f9c171038de366b409a0300d1b6cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5667e3d7288833ff8c576f64f111c3a2f59619a3577f969d0afdd633962bd30d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dd63d88fe2ebe6035765ee3f3c05c2ef747f9b2869da239825c08667935c9fb5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://671945e25068ff79ab8574dd96b2d81646b8240da41f34a10aa857f0c2520df2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://337fb496a29d8027f6400b57b1625b68029a16e368139f753b9b94df22448477\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c9529e6c53d42e111799753c41ef620b382b2a2b1991aa71d4ee846029a5136\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a9f55c50b0f40e90d5078225583f31860d2cfb67776e6b11ebdc9a41d8b20baf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0c1c7c6aa877690a8caf39779fec8180efdbf7169264c931008c3f639d938242\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-11T03:55:38Z\\\",\\\"message\\\":\\\"gressip/v1/apis/informers/externalversions/factory.go:140\\\\nI1011 03:55:37.822041 6251 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1011 03:55:37.822100 6251 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1011 03:55:37.822166 6251 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1011 03:55:37.822165 6251 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1011 03:55:37.822204 6251 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1011 03:55:37.822214 6251 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1011 03:55:37.822229 6251 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1011 03:55:37.822237 6251 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1011 03:55:37.822241 6251 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1011 03:55:37.822251 6251 factory.go:656] Stopping watch factory\\\\nI1011 03:55:37.822260 6251 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1011 03:55:37.822266 6251 ovnkube.go:599] Stopped ovnkube\\\\nI1011 03:55:37.822269 6251 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1011 03:55:37.822277 6251 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1011 03:55:37.822302 6251 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1011 03:55:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:36Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://732433bbe785758ed4cbedf667608c4780a5d1e7e2eafd432735f0d74ea01d48\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9f8c139ffa0a5dc24b40f6d5bcc2fa9422f58b275692d3ce3ba5a928c1e454a7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f8c139ffa0a5dc24b40f6d5bcc2fa9422f58b275692d3ce3ba5a928c1e454a7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:26Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-svt4z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:51Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:51 crc kubenswrapper[4798]: E1011 03:55:51.964230 4798 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T03:55:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:51Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T03:55:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:51Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T03:55:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:51Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T03:55:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:51Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"319d317c-5958-4e72-8ec1-75f04665505c\\\",\\\"systemUUID\\\":\\\"914a85fd-a642-4471-b13e-f33764f494b0\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:51Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:51 crc kubenswrapper[4798]: E1011 03:55:51.964381 4798 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Oct 11 03:55:51 crc kubenswrapper[4798]: I1011 03:55:51.966161 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:51 crc kubenswrapper[4798]: I1011 03:55:51.966200 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:51 crc kubenswrapper[4798]: I1011 03:55:51.966214 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:51 crc kubenswrapper[4798]: I1011 03:55:51.966230 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:51 crc kubenswrapper[4798]: I1011 03:55:51.966242 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:51Z","lastTransitionTime":"2025-10-11T03:55:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:51 crc kubenswrapper[4798]: I1011 03:55:51.971260 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-cwk9h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a97c4ffd-df63-4ce6-8903-80849a23a9fa\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3dc58b162f1d2b3447282c149ec7cd6d60b4c53fa3cf51dfb4a821d115c9a2c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pfvbf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:28Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-cwk9h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:51Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:51 crc kubenswrapper[4798]: I1011 03:55:51.987750 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bfd670e2-5a6d-4d56-97d6-c72b5474cfe6\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d79289a0b2f4d9eee836f57ba898bd329060194772e80855547f1982371b0921\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1970a648bf4ee991b9b58bfade524bee260b45f727cba5864a6545a10487b3ae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a79edde80930f3aad632c14d6ad6b3df6f8449f32107a82eb906785e873e1ab7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4b8e09f509a9f25922d49024be407873d79d01edb604193f9d19fcd6c89cf96d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://805f894634e76814dc3071315ddd3d8ac46cdf65a12e97f4ca56b98fe53ab25a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-11T03:55:19Z\\\",\\\"message\\\":\\\"W1011 03:55:08.564227 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1011 03:55:08.564630 1 crypto.go:601] Generating new CA for check-endpoints-signer@1760154908 cert, and key in /tmp/serving-cert-1527351980/serving-signer.crt, /tmp/serving-cert-1527351980/serving-signer.key\\\\nI1011 03:55:08.869630 1 observer_polling.go:159] Starting file observer\\\\nW1011 03:55:08.874583 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1011 03:55:08.874721 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1011 03:55:08.878058 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1527351980/tls.crt::/tmp/serving-cert-1527351980/tls.key\\\\\\\"\\\\nF1011 03:55:19.813421 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:08Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://58ca7c079d81ba990c4508c024d6db7c3503be1d628477798459b32e2374ef22\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:08Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1171e7b719c532d99d1d222fc5dbc434060d5d149bd2b6b3dea02f9716816b26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1171e7b719c532d99d1d222fc5dbc434060d5d149bd2b6b3dea02f9716816b26\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:05Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:51Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:51 crc kubenswrapper[4798]: I1011 03:55:51.999505 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2f4f8842-e378-44a9-b919-b1bf9a8df80e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://903e13c07c622087e30f21ffff2b60c31c852d8e994c23d7af6beab5a71bb873\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1087e7b8de486a192155ed94497edbca0dc98e4a84a577267fb132fc48e6b142\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://08a33c11cee6a48ebcf66f251111b8bd888bd40fe6b52f43a3768eac817ed59f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eff419f507fc5362e956419c6fd14dc6a161f7297b026a5ec449647051f98767\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:05Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:51Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:52 crc kubenswrapper[4798]: I1011 03:55:52.011078 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4936cbe98df337a2b4d0d6e3effe0afd990facff65daf923c475c9108e05fef9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:52Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:52 crc kubenswrapper[4798]: I1011 03:55:52.021794 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:52Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:52 crc kubenswrapper[4798]: I1011 03:55:52.030475 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kszgd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e999fc8d-dd8d-415f-b72b-1c6189cf18a9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a92a11778001614d226c95af9dde787c78232568c3b5c073618a843f1225b21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h6wm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kszgd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:52Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:52 crc kubenswrapper[4798]: I1011 03:55:52.041118 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:52Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:52 crc kubenswrapper[4798]: I1011 03:55:52.068880 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:52 crc kubenswrapper[4798]: I1011 03:55:52.068919 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:52 crc kubenswrapper[4798]: I1011 03:55:52.068930 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:52 crc kubenswrapper[4798]: I1011 03:55:52.068946 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:52 crc kubenswrapper[4798]: I1011 03:55:52.068958 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:52Z","lastTransitionTime":"2025-10-11T03:55:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:52 crc kubenswrapper[4798]: I1011 03:55:52.171086 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:52 crc kubenswrapper[4798]: I1011 03:55:52.171121 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:52 crc kubenswrapper[4798]: I1011 03:55:52.171131 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:52 crc kubenswrapper[4798]: I1011 03:55:52.171144 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:52 crc kubenswrapper[4798]: I1011 03:55:52.171154 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:52Z","lastTransitionTime":"2025-10-11T03:55:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:52 crc kubenswrapper[4798]: I1011 03:55:52.272822 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:52 crc kubenswrapper[4798]: I1011 03:55:52.272860 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:52 crc kubenswrapper[4798]: I1011 03:55:52.272872 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:52 crc kubenswrapper[4798]: I1011 03:55:52.272887 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:52 crc kubenswrapper[4798]: I1011 03:55:52.272900 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:52Z","lastTransitionTime":"2025-10-11T03:55:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:52 crc kubenswrapper[4798]: I1011 03:55:52.374971 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:52 crc kubenswrapper[4798]: I1011 03:55:52.375006 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:52 crc kubenswrapper[4798]: I1011 03:55:52.375016 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:52 crc kubenswrapper[4798]: I1011 03:55:52.375031 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:52 crc kubenswrapper[4798]: I1011 03:55:52.375041 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:52Z","lastTransitionTime":"2025-10-11T03:55:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:52 crc kubenswrapper[4798]: I1011 03:55:52.477801 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:52 crc kubenswrapper[4798]: I1011 03:55:52.477875 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:52 crc kubenswrapper[4798]: I1011 03:55:52.477893 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:52 crc kubenswrapper[4798]: I1011 03:55:52.477915 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:52 crc kubenswrapper[4798]: I1011 03:55:52.477932 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:52Z","lastTransitionTime":"2025-10-11T03:55:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:52 crc kubenswrapper[4798]: I1011 03:55:52.580202 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:52 crc kubenswrapper[4798]: I1011 03:55:52.580276 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:52 crc kubenswrapper[4798]: I1011 03:55:52.580299 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:52 crc kubenswrapper[4798]: I1011 03:55:52.580328 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:52 crc kubenswrapper[4798]: I1011 03:55:52.580350 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:52Z","lastTransitionTime":"2025-10-11T03:55:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:52 crc kubenswrapper[4798]: I1011 03:55:52.683036 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:52 crc kubenswrapper[4798]: I1011 03:55:52.683087 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:52 crc kubenswrapper[4798]: I1011 03:55:52.683099 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:52 crc kubenswrapper[4798]: I1011 03:55:52.683114 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:52 crc kubenswrapper[4798]: I1011 03:55:52.683126 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:52Z","lastTransitionTime":"2025-10-11T03:55:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:52 crc kubenswrapper[4798]: I1011 03:55:52.785754 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:52 crc kubenswrapper[4798]: I1011 03:55:52.785826 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:52 crc kubenswrapper[4798]: I1011 03:55:52.785849 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:52 crc kubenswrapper[4798]: I1011 03:55:52.785878 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:52 crc kubenswrapper[4798]: I1011 03:55:52.785900 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:52Z","lastTransitionTime":"2025-10-11T03:55:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:52 crc kubenswrapper[4798]: I1011 03:55:52.789940 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-svt4z_3c2a342b-5252-4957-9e2c-8f81c304b8af/ovnkube-controller/2.log" Oct 11 03:55:52 crc kubenswrapper[4798]: I1011 03:55:52.790835 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-svt4z_3c2a342b-5252-4957-9e2c-8f81c304b8af/ovnkube-controller/1.log" Oct 11 03:55:52 crc kubenswrapper[4798]: I1011 03:55:52.794002 4798 generic.go:334] "Generic (PLEG): container finished" podID="3c2a342b-5252-4957-9e2c-8f81c304b8af" containerID="a9f55c50b0f40e90d5078225583f31860d2cfb67776e6b11ebdc9a41d8b20baf" exitCode=1 Oct 11 03:55:52 crc kubenswrapper[4798]: I1011 03:55:52.794055 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-svt4z" event={"ID":"3c2a342b-5252-4957-9e2c-8f81c304b8af","Type":"ContainerDied","Data":"a9f55c50b0f40e90d5078225583f31860d2cfb67776e6b11ebdc9a41d8b20baf"} Oct 11 03:55:52 crc kubenswrapper[4798]: I1011 03:55:52.794137 4798 scope.go:117] "RemoveContainer" containerID="0c1c7c6aa877690a8caf39779fec8180efdbf7169264c931008c3f639d938242" Oct 11 03:55:52 crc kubenswrapper[4798]: I1011 03:55:52.795148 4798 scope.go:117] "RemoveContainer" containerID="a9f55c50b0f40e90d5078225583f31860d2cfb67776e6b11ebdc9a41d8b20baf" Oct 11 03:55:52 crc kubenswrapper[4798]: E1011 03:55:52.795467 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-svt4z_openshift-ovn-kubernetes(3c2a342b-5252-4957-9e2c-8f81c304b8af)\"" pod="openshift-ovn-kubernetes/ovnkube-node-svt4z" podUID="3c2a342b-5252-4957-9e2c-8f81c304b8af" Oct 11 03:55:52 crc kubenswrapper[4798]: I1011 03:55:52.816717 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5ec61df088ad49a0392264c33da84b7324c4aa608201d71e309401ab80013fb8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://10697a88b117962307b632f70354acdfd493e92c2909908a31c90c754dcbc0ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:52Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:52 crc kubenswrapper[4798]: I1011 03:55:52.834293 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-5bfzt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3cf2b185-9a26-4448-8fc5-4885f98daf87\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jdt7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jdt7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:39Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-5bfzt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:52Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:52 crc kubenswrapper[4798]: I1011 03:55:52.849950 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-p6xdd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bd9c0e44-3329-422a-907b-e9e9bb6194cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a46e4bfff29af67bd6304e68df8fb4aa7e2184648b85398f453fde878b27d825\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p9wst\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-p6xdd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:52Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:52 crc kubenswrapper[4798]: I1011 03:55:52.863145 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-5sxhs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"89cbb20d-9fdb-4fed-9ad2-acb1207afc46\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a3655b5826d719e04f0972221ea72554a15025acffa8e2689b4b2855d6151d9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z7jvd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f0c589033410abafbc39c6a7386343c70bb92ed0a1d7336188a44cc6e4fd65c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z7jvd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-5sxhs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:52Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:52 crc kubenswrapper[4798]: I1011 03:55:52.877110 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6129cc4f78a62c5fb76f4859ec954c8901954d27b7f05672e97d992e0c933191\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:52Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:52 crc kubenswrapper[4798]: I1011 03:55:52.888252 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:52 crc kubenswrapper[4798]: I1011 03:55:52.888316 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:52 crc kubenswrapper[4798]: I1011 03:55:52.888331 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:52 crc kubenswrapper[4798]: I1011 03:55:52.888352 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:52 crc kubenswrapper[4798]: I1011 03:55:52.888369 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:52Z","lastTransitionTime":"2025-10-11T03:55:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:52 crc kubenswrapper[4798]: I1011 03:55:52.896012 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-svt4z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3c2a342b-5252-4957-9e2c-8f81c304b8af\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5a66de04a274f07f090f66d9da727e1985f9c171038de366b409a0300d1b6cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5667e3d7288833ff8c576f64f111c3a2f59619a3577f969d0afdd633962bd30d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dd63d88fe2ebe6035765ee3f3c05c2ef747f9b2869da239825c08667935c9fb5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://671945e25068ff79ab8574dd96b2d81646b8240da41f34a10aa857f0c2520df2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://337fb496a29d8027f6400b57b1625b68029a16e368139f753b9b94df22448477\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c9529e6c53d42e111799753c41ef620b382b2a2b1991aa71d4ee846029a5136\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a9f55c50b0f40e90d5078225583f31860d2cfb67776e6b11ebdc9a41d8b20baf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0c1c7c6aa877690a8caf39779fec8180efdbf7169264c931008c3f639d938242\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-11T03:55:38Z\\\",\\\"message\\\":\\\"gressip/v1/apis/informers/externalversions/factory.go:140\\\\nI1011 03:55:37.822041 6251 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1011 03:55:37.822100 6251 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1011 03:55:37.822166 6251 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1011 03:55:37.822165 6251 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1011 03:55:37.822204 6251 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1011 03:55:37.822214 6251 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1011 03:55:37.822229 6251 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1011 03:55:37.822237 6251 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1011 03:55:37.822241 6251 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1011 03:55:37.822251 6251 factory.go:656] Stopping watch factory\\\\nI1011 03:55:37.822260 6251 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1011 03:55:37.822266 6251 ovnkube.go:599] Stopped ovnkube\\\\nI1011 03:55:37.822269 6251 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1011 03:55:37.822277 6251 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1011 03:55:37.822302 6251 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1011 03:55:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:36Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a9f55c50b0f40e90d5078225583f31860d2cfb67776e6b11ebdc9a41d8b20baf\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-11T03:55:52Z\\\",\\\"message\\\":\\\"UUIDName:}]\\\\nI1011 03:55:52.357804 6446 model_client.go:398] Mutate operations generated as: [{Op:mutate Table:Logical_Router Row:map[] Rows:[] Columns:[] Mutations:[{Column:nat Mutator:insert Value:{GoSet:[{GoUUID:73135118-cf1b-4568-bd31-2f50308bf69d}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {e3c4661a-36a6-47f0-a6c0-a4ee741f2224}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1011 03:55:52.356105 6446 obj_retry.go:303] Retry object setup: *v1.Pod openshift-kube-apiserver/kube-apiserver-crc\\\\nI1011 03:55:52.357905 6446 obj_retry.go:365] Adding new object: *v1.Pod openshift-kube-apiserver/kube-apiserver-crc\\\\nI1011 03:55:52.357951 6446 ovn.go:134] Ensuring zone local for Pod openshift-kube-apiserver/kube-apiserver-crc in node crc\\\\nI1011 03:55:52.357979 6446 obj_retry.go:386] Retry successful for *v1.Pod openshift-kube-apiserver/kube-apiserver-crc after 0 failed attempt(s)\\\\nI1011 03:55:52.358011 6446 default_network_controller.go:776] Recording success event on pod openshift-kube-apiserver/kube-apiserver-crc\\\\nI1011 03:55:52.358029 6446 services_controller.go:356] Processing sync for service openshift-ingress-operator/metrics for network=default\\\\nF1011 03:55:52.357999 6446 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://732433bbe785758ed4cbedf667608c4780a5d1e7e2eafd432735f0d74ea01d48\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9f8c139ffa0a5dc24b40f6d5bcc2fa9422f58b275692d3ce3ba5a928c1e454a7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f8c139ffa0a5dc24b40f6d5bcc2fa9422f58b275692d3ce3ba5a928c1e454a7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:26Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-svt4z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:52Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:52 crc kubenswrapper[4798]: I1011 03:55:52.907199 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-cwk9h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a97c4ffd-df63-4ce6-8903-80849a23a9fa\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3dc58b162f1d2b3447282c149ec7cd6d60b4c53fa3cf51dfb4a821d115c9a2c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pfvbf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:28Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-cwk9h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:52Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:52 crc kubenswrapper[4798]: I1011 03:55:52.919169 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:52Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:52 crc kubenswrapper[4798]: I1011 03:55:52.931210 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42571bc8-2186-4e3b-bba9-28f5a8f364d0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c66c40f71f0877eee058976f94625fe1c4bcab5dfad363be39e6863c2e9f07a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5gdw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d4862faa12fa49bd57098b8b4e9e9dd287da8c8808a9ca32bfdd3a9b4313945d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5gdw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-h28s2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:52Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:52 crc kubenswrapper[4798]: I1011 03:55:52.944935 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-7fk5l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4b6a179c-1d23-4cb2-91b9-d34496a74456\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5bfabccc875a12072ad81dc6bf74d849c99baa004c10d213f798c363248137db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b908ba64c3d6d1d45e1cab82e69176c3c84bf88dde3e9425fc7c3b4c84685ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8b908ba64c3d6d1d45e1cab82e69176c3c84bf88dde3e9425fc7c3b4c84685ed\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae38c0781a4a8367bba95648253e72409e06cdc39825b137084fa6f4d33a4cb7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ae38c0781a4a8367bba95648253e72409e06cdc39825b137084fa6f4d33a4cb7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b663ffff34d80940880b3959c81519084b55bd5edcedbce9402d5fad1f898034\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b663ffff34d80940880b3959c81519084b55bd5edcedbce9402d5fad1f898034\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5b4df13fbe7bbb9ba49f772c2bd4bfe6a10a17bfa12bc500f0647659be7b0cdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5b4df13fbe7bbb9ba49f772c2bd4bfe6a10a17bfa12bc500f0647659be7b0cdf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://997fd5172ccd393f7befe4343619dd30eba136428035ac9fc71f0ec99791a53b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://997fd5172ccd393f7befe4343619dd30eba136428035ac9fc71f0ec99791a53b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://20198b637812738041fd2e57cdb24097b8f92af726b1ad103fee423469a27136\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://20198b637812738041fd2e57cdb24097b8f92af726b1ad103fee423469a27136\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-7fk5l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:52Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:52 crc kubenswrapper[4798]: I1011 03:55:52.966885 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:52Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:52 crc kubenswrapper[4798]: I1011 03:55:52.980165 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kszgd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e999fc8d-dd8d-415f-b72b-1c6189cf18a9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a92a11778001614d226c95af9dde787c78232568c3b5c073618a843f1225b21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h6wm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kszgd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:52Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:52 crc kubenswrapper[4798]: I1011 03:55:52.990760 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:52 crc kubenswrapper[4798]: I1011 03:55:52.990798 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:52 crc kubenswrapper[4798]: I1011 03:55:52.990809 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:52 crc kubenswrapper[4798]: I1011 03:55:52.990824 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:52 crc kubenswrapper[4798]: I1011 03:55:52.990833 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:52Z","lastTransitionTime":"2025-10-11T03:55:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:52 crc kubenswrapper[4798]: I1011 03:55:52.993238 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:52Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:53 crc kubenswrapper[4798]: I1011 03:55:53.011975 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bfd670e2-5a6d-4d56-97d6-c72b5474cfe6\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d79289a0b2f4d9eee836f57ba898bd329060194772e80855547f1982371b0921\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1970a648bf4ee991b9b58bfade524bee260b45f727cba5864a6545a10487b3ae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a79edde80930f3aad632c14d6ad6b3df6f8449f32107a82eb906785e873e1ab7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4b8e09f509a9f25922d49024be407873d79d01edb604193f9d19fcd6c89cf96d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://805f894634e76814dc3071315ddd3d8ac46cdf65a12e97f4ca56b98fe53ab25a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-11T03:55:19Z\\\",\\\"message\\\":\\\"W1011 03:55:08.564227 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1011 03:55:08.564630 1 crypto.go:601] Generating new CA for check-endpoints-signer@1760154908 cert, and key in /tmp/serving-cert-1527351980/serving-signer.crt, /tmp/serving-cert-1527351980/serving-signer.key\\\\nI1011 03:55:08.869630 1 observer_polling.go:159] Starting file observer\\\\nW1011 03:55:08.874583 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1011 03:55:08.874721 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1011 03:55:08.878058 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1527351980/tls.crt::/tmp/serving-cert-1527351980/tls.key\\\\\\\"\\\\nF1011 03:55:19.813421 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:08Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://58ca7c079d81ba990c4508c024d6db7c3503be1d628477798459b32e2374ef22\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:08Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1171e7b719c532d99d1d222fc5dbc434060d5d149bd2b6b3dea02f9716816b26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1171e7b719c532d99d1d222fc5dbc434060d5d149bd2b6b3dea02f9716816b26\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:05Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:53Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:53 crc kubenswrapper[4798]: I1011 03:55:53.027538 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2f4f8842-e378-44a9-b919-b1bf9a8df80e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://903e13c07c622087e30f21ffff2b60c31c852d8e994c23d7af6beab5a71bb873\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1087e7b8de486a192155ed94497edbca0dc98e4a84a577267fb132fc48e6b142\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://08a33c11cee6a48ebcf66f251111b8bd888bd40fe6b52f43a3768eac817ed59f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eff419f507fc5362e956419c6fd14dc6a161f7297b026a5ec449647051f98767\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:05Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:53Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:53 crc kubenswrapper[4798]: I1011 03:55:53.040277 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4936cbe98df337a2b4d0d6e3effe0afd990facff65daf923c475c9108e05fef9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:53Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:53 crc kubenswrapper[4798]: I1011 03:55:53.093690 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:53 crc kubenswrapper[4798]: I1011 03:55:53.093738 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:53 crc kubenswrapper[4798]: I1011 03:55:53.093749 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:53 crc kubenswrapper[4798]: I1011 03:55:53.093765 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:53 crc kubenswrapper[4798]: I1011 03:55:53.093777 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:53Z","lastTransitionTime":"2025-10-11T03:55:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:53 crc kubenswrapper[4798]: I1011 03:55:53.196434 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:53 crc kubenswrapper[4798]: I1011 03:55:53.196462 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:53 crc kubenswrapper[4798]: I1011 03:55:53.196470 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:53 crc kubenswrapper[4798]: I1011 03:55:53.196482 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:53 crc kubenswrapper[4798]: I1011 03:55:53.196491 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:53Z","lastTransitionTime":"2025-10-11T03:55:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:53 crc kubenswrapper[4798]: I1011 03:55:53.299156 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:53 crc kubenswrapper[4798]: I1011 03:55:53.299486 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:53 crc kubenswrapper[4798]: I1011 03:55:53.299591 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:53 crc kubenswrapper[4798]: I1011 03:55:53.299740 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:53 crc kubenswrapper[4798]: I1011 03:55:53.299848 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:53Z","lastTransitionTime":"2025-10-11T03:55:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:53 crc kubenswrapper[4798]: I1011 03:55:53.402257 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:53 crc kubenswrapper[4798]: I1011 03:55:53.402293 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:53 crc kubenswrapper[4798]: I1011 03:55:53.402304 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:53 crc kubenswrapper[4798]: I1011 03:55:53.402321 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:53 crc kubenswrapper[4798]: I1011 03:55:53.402330 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:53Z","lastTransitionTime":"2025-10-11T03:55:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:53 crc kubenswrapper[4798]: I1011 03:55:53.423129 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-5bfzt" Oct 11 03:55:53 crc kubenswrapper[4798]: E1011 03:55:53.423467 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-5bfzt" podUID="3cf2b185-9a26-4448-8fc5-4885f98daf87" Oct 11 03:55:53 crc kubenswrapper[4798]: I1011 03:55:53.423676 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 11 03:55:53 crc kubenswrapper[4798]: E1011 03:55:53.423831 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 11 03:55:53 crc kubenswrapper[4798]: I1011 03:55:53.423676 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 11 03:55:53 crc kubenswrapper[4798]: E1011 03:55:53.423945 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 11 03:55:53 crc kubenswrapper[4798]: I1011 03:55:53.423988 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 11 03:55:53 crc kubenswrapper[4798]: E1011 03:55:53.424053 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 11 03:55:53 crc kubenswrapper[4798]: I1011 03:55:53.505308 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:53 crc kubenswrapper[4798]: I1011 03:55:53.505373 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:53 crc kubenswrapper[4798]: I1011 03:55:53.505426 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:53 crc kubenswrapper[4798]: I1011 03:55:53.505459 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:53 crc kubenswrapper[4798]: I1011 03:55:53.505477 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:53Z","lastTransitionTime":"2025-10-11T03:55:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:53 crc kubenswrapper[4798]: I1011 03:55:53.610622 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:53 crc kubenswrapper[4798]: I1011 03:55:53.610663 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:53 crc kubenswrapper[4798]: I1011 03:55:53.610673 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:53 crc kubenswrapper[4798]: I1011 03:55:53.610688 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:53 crc kubenswrapper[4798]: I1011 03:55:53.610699 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:53Z","lastTransitionTime":"2025-10-11T03:55:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:53 crc kubenswrapper[4798]: I1011 03:55:53.690219 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Oct 11 03:55:53 crc kubenswrapper[4798]: I1011 03:55:53.698376 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler/openshift-kube-scheduler-crc"] Oct 11 03:55:53 crc kubenswrapper[4798]: I1011 03:55:53.708790 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bfd670e2-5a6d-4d56-97d6-c72b5474cfe6\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d79289a0b2f4d9eee836f57ba898bd329060194772e80855547f1982371b0921\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1970a648bf4ee991b9b58bfade524bee260b45f727cba5864a6545a10487b3ae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a79edde80930f3aad632c14d6ad6b3df6f8449f32107a82eb906785e873e1ab7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4b8e09f509a9f25922d49024be407873d79d01edb604193f9d19fcd6c89cf96d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://805f894634e76814dc3071315ddd3d8ac46cdf65a12e97f4ca56b98fe53ab25a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-11T03:55:19Z\\\",\\\"message\\\":\\\"W1011 03:55:08.564227 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1011 03:55:08.564630 1 crypto.go:601] Generating new CA for check-endpoints-signer@1760154908 cert, and key in /tmp/serving-cert-1527351980/serving-signer.crt, /tmp/serving-cert-1527351980/serving-signer.key\\\\nI1011 03:55:08.869630 1 observer_polling.go:159] Starting file observer\\\\nW1011 03:55:08.874583 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1011 03:55:08.874721 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1011 03:55:08.878058 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1527351980/tls.crt::/tmp/serving-cert-1527351980/tls.key\\\\\\\"\\\\nF1011 03:55:19.813421 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:08Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://58ca7c079d81ba990c4508c024d6db7c3503be1d628477798459b32e2374ef22\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:08Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1171e7b719c532d99d1d222fc5dbc434060d5d149bd2b6b3dea02f9716816b26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1171e7b719c532d99d1d222fc5dbc434060d5d149bd2b6b3dea02f9716816b26\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:05Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:53Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:53 crc kubenswrapper[4798]: I1011 03:55:53.714975 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:53 crc kubenswrapper[4798]: I1011 03:55:53.715050 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:53 crc kubenswrapper[4798]: I1011 03:55:53.715067 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:53 crc kubenswrapper[4798]: I1011 03:55:53.715089 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:53 crc kubenswrapper[4798]: I1011 03:55:53.715125 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:53Z","lastTransitionTime":"2025-10-11T03:55:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:53 crc kubenswrapper[4798]: I1011 03:55:53.723660 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2f4f8842-e378-44a9-b919-b1bf9a8df80e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://903e13c07c622087e30f21ffff2b60c31c852d8e994c23d7af6beab5a71bb873\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1087e7b8de486a192155ed94497edbca0dc98e4a84a577267fb132fc48e6b142\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://08a33c11cee6a48ebcf66f251111b8bd888bd40fe6b52f43a3768eac817ed59f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eff419f507fc5362e956419c6fd14dc6a161f7297b026a5ec449647051f98767\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:05Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:53Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:53 crc kubenswrapper[4798]: I1011 03:55:53.735745 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4936cbe98df337a2b4d0d6e3effe0afd990facff65daf923c475c9108e05fef9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:53Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:53 crc kubenswrapper[4798]: I1011 03:55:53.748348 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:53Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:53 crc kubenswrapper[4798]: I1011 03:55:53.756903 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kszgd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e999fc8d-dd8d-415f-b72b-1c6189cf18a9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a92a11778001614d226c95af9dde787c78232568c3b5c073618a843f1225b21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h6wm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kszgd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:53Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:53 crc kubenswrapper[4798]: I1011 03:55:53.767842 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:53Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:53 crc kubenswrapper[4798]: I1011 03:55:53.784357 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5ec61df088ad49a0392264c33da84b7324c4aa608201d71e309401ab80013fb8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://10697a88b117962307b632f70354acdfd493e92c2909908a31c90c754dcbc0ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:53Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:53 crc kubenswrapper[4798]: I1011 03:55:53.795532 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-5bfzt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3cf2b185-9a26-4448-8fc5-4885f98daf87\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jdt7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jdt7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:39Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-5bfzt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:53Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:53 crc kubenswrapper[4798]: I1011 03:55:53.797990 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-svt4z_3c2a342b-5252-4957-9e2c-8f81c304b8af/ovnkube-controller/2.log" Oct 11 03:55:53 crc kubenswrapper[4798]: I1011 03:55:53.801631 4798 scope.go:117] "RemoveContainer" containerID="a9f55c50b0f40e90d5078225583f31860d2cfb67776e6b11ebdc9a41d8b20baf" Oct 11 03:55:53 crc kubenswrapper[4798]: E1011 03:55:53.801853 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-svt4z_openshift-ovn-kubernetes(3c2a342b-5252-4957-9e2c-8f81c304b8af)\"" pod="openshift-ovn-kubernetes/ovnkube-node-svt4z" podUID="3c2a342b-5252-4957-9e2c-8f81c304b8af" Oct 11 03:55:53 crc kubenswrapper[4798]: I1011 03:55:53.811680 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6129cc4f78a62c5fb76f4859ec954c8901954d27b7f05672e97d992e0c933191\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:53Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:53 crc kubenswrapper[4798]: I1011 03:55:53.816794 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:53 crc kubenswrapper[4798]: I1011 03:55:53.816842 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:53 crc kubenswrapper[4798]: I1011 03:55:53.816856 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:53 crc kubenswrapper[4798]: I1011 03:55:53.816869 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:53 crc kubenswrapper[4798]: I1011 03:55:53.816882 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:53Z","lastTransitionTime":"2025-10-11T03:55:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:53 crc kubenswrapper[4798]: I1011 03:55:53.829978 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-p6xdd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bd9c0e44-3329-422a-907b-e9e9bb6194cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a46e4bfff29af67bd6304e68df8fb4aa7e2184648b85398f453fde878b27d825\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p9wst\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-p6xdd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:53Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:53 crc kubenswrapper[4798]: I1011 03:55:53.843602 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-5sxhs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"89cbb20d-9fdb-4fed-9ad2-acb1207afc46\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a3655b5826d719e04f0972221ea72554a15025acffa8e2689b4b2855d6151d9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z7jvd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f0c589033410abafbc39c6a7386343c70bb92ed0a1d7336188a44cc6e4fd65c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z7jvd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-5sxhs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:53Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:53 crc kubenswrapper[4798]: I1011 03:55:53.855701 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:53Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:53 crc kubenswrapper[4798]: I1011 03:55:53.866137 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42571bc8-2186-4e3b-bba9-28f5a8f364d0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c66c40f71f0877eee058976f94625fe1c4bcab5dfad363be39e6863c2e9f07a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5gdw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d4862faa12fa49bd57098b8b4e9e9dd287da8c8808a9ca32bfdd3a9b4313945d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5gdw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-h28s2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:53Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:53 crc kubenswrapper[4798]: I1011 03:55:53.878969 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-7fk5l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4b6a179c-1d23-4cb2-91b9-d34496a74456\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5bfabccc875a12072ad81dc6bf74d849c99baa004c10d213f798c363248137db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b908ba64c3d6d1d45e1cab82e69176c3c84bf88dde3e9425fc7c3b4c84685ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8b908ba64c3d6d1d45e1cab82e69176c3c84bf88dde3e9425fc7c3b4c84685ed\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae38c0781a4a8367bba95648253e72409e06cdc39825b137084fa6f4d33a4cb7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ae38c0781a4a8367bba95648253e72409e06cdc39825b137084fa6f4d33a4cb7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b663ffff34d80940880b3959c81519084b55bd5edcedbce9402d5fad1f898034\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b663ffff34d80940880b3959c81519084b55bd5edcedbce9402d5fad1f898034\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5b4df13fbe7bbb9ba49f772c2bd4bfe6a10a17bfa12bc500f0647659be7b0cdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5b4df13fbe7bbb9ba49f772c2bd4bfe6a10a17bfa12bc500f0647659be7b0cdf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://997fd5172ccd393f7befe4343619dd30eba136428035ac9fc71f0ec99791a53b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://997fd5172ccd393f7befe4343619dd30eba136428035ac9fc71f0ec99791a53b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://20198b637812738041fd2e57cdb24097b8f92af726b1ad103fee423469a27136\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://20198b637812738041fd2e57cdb24097b8f92af726b1ad103fee423469a27136\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-7fk5l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:53Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:53 crc kubenswrapper[4798]: I1011 03:55:53.896333 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-svt4z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3c2a342b-5252-4957-9e2c-8f81c304b8af\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5a66de04a274f07f090f66d9da727e1985f9c171038de366b409a0300d1b6cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5667e3d7288833ff8c576f64f111c3a2f59619a3577f969d0afdd633962bd30d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dd63d88fe2ebe6035765ee3f3c05c2ef747f9b2869da239825c08667935c9fb5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://671945e25068ff79ab8574dd96b2d81646b8240da41f34a10aa857f0c2520df2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://337fb496a29d8027f6400b57b1625b68029a16e368139f753b9b94df22448477\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c9529e6c53d42e111799753c41ef620b382b2a2b1991aa71d4ee846029a5136\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a9f55c50b0f40e90d5078225583f31860d2cfb67776e6b11ebdc9a41d8b20baf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0c1c7c6aa877690a8caf39779fec8180efdbf7169264c931008c3f639d938242\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-11T03:55:38Z\\\",\\\"message\\\":\\\"gressip/v1/apis/informers/externalversions/factory.go:140\\\\nI1011 03:55:37.822041 6251 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1011 03:55:37.822100 6251 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1011 03:55:37.822166 6251 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1011 03:55:37.822165 6251 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1011 03:55:37.822204 6251 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1011 03:55:37.822214 6251 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1011 03:55:37.822229 6251 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1011 03:55:37.822237 6251 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1011 03:55:37.822241 6251 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1011 03:55:37.822251 6251 factory.go:656] Stopping watch factory\\\\nI1011 03:55:37.822260 6251 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1011 03:55:37.822266 6251 ovnkube.go:599] Stopped ovnkube\\\\nI1011 03:55:37.822269 6251 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1011 03:55:37.822277 6251 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1011 03:55:37.822302 6251 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1011 03:55:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:36Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a9f55c50b0f40e90d5078225583f31860d2cfb67776e6b11ebdc9a41d8b20baf\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-11T03:55:52Z\\\",\\\"message\\\":\\\"UUIDName:}]\\\\nI1011 03:55:52.357804 6446 model_client.go:398] Mutate operations generated as: [{Op:mutate Table:Logical_Router Row:map[] Rows:[] Columns:[] Mutations:[{Column:nat Mutator:insert Value:{GoSet:[{GoUUID:73135118-cf1b-4568-bd31-2f50308bf69d}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {e3c4661a-36a6-47f0-a6c0-a4ee741f2224}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1011 03:55:52.356105 6446 obj_retry.go:303] Retry object setup: *v1.Pod openshift-kube-apiserver/kube-apiserver-crc\\\\nI1011 03:55:52.357905 6446 obj_retry.go:365] Adding new object: *v1.Pod openshift-kube-apiserver/kube-apiserver-crc\\\\nI1011 03:55:52.357951 6446 ovn.go:134] Ensuring zone local for Pod openshift-kube-apiserver/kube-apiserver-crc in node crc\\\\nI1011 03:55:52.357979 6446 obj_retry.go:386] Retry successful for *v1.Pod openshift-kube-apiserver/kube-apiserver-crc after 0 failed attempt(s)\\\\nI1011 03:55:52.358011 6446 default_network_controller.go:776] Recording success event on pod openshift-kube-apiserver/kube-apiserver-crc\\\\nI1011 03:55:52.358029 6446 services_controller.go:356] Processing sync for service openshift-ingress-operator/metrics for network=default\\\\nF1011 03:55:52.357999 6446 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://732433bbe785758ed4cbedf667608c4780a5d1e7e2eafd432735f0d74ea01d48\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9f8c139ffa0a5dc24b40f6d5bcc2fa9422f58b275692d3ce3ba5a928c1e454a7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f8c139ffa0a5dc24b40f6d5bcc2fa9422f58b275692d3ce3ba5a928c1e454a7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:26Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-svt4z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:53Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:53 crc kubenswrapper[4798]: I1011 03:55:53.905567 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-cwk9h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a97c4ffd-df63-4ce6-8903-80849a23a9fa\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3dc58b162f1d2b3447282c149ec7cd6d60b4c53fa3cf51dfb4a821d115c9a2c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pfvbf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:28Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-cwk9h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:53Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:53 crc kubenswrapper[4798]: I1011 03:55:53.918558 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:53 crc kubenswrapper[4798]: I1011 03:55:53.918639 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:53 crc kubenswrapper[4798]: I1011 03:55:53.918653 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:53 crc kubenswrapper[4798]: I1011 03:55:53.918670 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:53 crc kubenswrapper[4798]: I1011 03:55:53.918680 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:53Z","lastTransitionTime":"2025-10-11T03:55:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:53 crc kubenswrapper[4798]: I1011 03:55:53.921077 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6129cc4f78a62c5fb76f4859ec954c8901954d27b7f05672e97d992e0c933191\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:53Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:53 crc kubenswrapper[4798]: I1011 03:55:53.932278 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-p6xdd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bd9c0e44-3329-422a-907b-e9e9bb6194cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a46e4bfff29af67bd6304e68df8fb4aa7e2184648b85398f453fde878b27d825\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p9wst\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-p6xdd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:53Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:53 crc kubenswrapper[4798]: I1011 03:55:53.943070 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-5sxhs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"89cbb20d-9fdb-4fed-9ad2-acb1207afc46\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a3655b5826d719e04f0972221ea72554a15025acffa8e2689b4b2855d6151d9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z7jvd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f0c589033410abafbc39c6a7386343c70bb92ed0a1d7336188a44cc6e4fd65c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z7jvd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-5sxhs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:53Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:53 crc kubenswrapper[4798]: I1011 03:55:53.953798 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:53Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:53 crc kubenswrapper[4798]: I1011 03:55:53.965120 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42571bc8-2186-4e3b-bba9-28f5a8f364d0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c66c40f71f0877eee058976f94625fe1c4bcab5dfad363be39e6863c2e9f07a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5gdw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d4862faa12fa49bd57098b8b4e9e9dd287da8c8808a9ca32bfdd3a9b4313945d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5gdw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-h28s2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:53Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:53 crc kubenswrapper[4798]: I1011 03:55:53.978639 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-7fk5l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4b6a179c-1d23-4cb2-91b9-d34496a74456\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5bfabccc875a12072ad81dc6bf74d849c99baa004c10d213f798c363248137db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b908ba64c3d6d1d45e1cab82e69176c3c84bf88dde3e9425fc7c3b4c84685ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8b908ba64c3d6d1d45e1cab82e69176c3c84bf88dde3e9425fc7c3b4c84685ed\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae38c0781a4a8367bba95648253e72409e06cdc39825b137084fa6f4d33a4cb7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ae38c0781a4a8367bba95648253e72409e06cdc39825b137084fa6f4d33a4cb7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b663ffff34d80940880b3959c81519084b55bd5edcedbce9402d5fad1f898034\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b663ffff34d80940880b3959c81519084b55bd5edcedbce9402d5fad1f898034\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5b4df13fbe7bbb9ba49f772c2bd4bfe6a10a17bfa12bc500f0647659be7b0cdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5b4df13fbe7bbb9ba49f772c2bd4bfe6a10a17bfa12bc500f0647659be7b0cdf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://997fd5172ccd393f7befe4343619dd30eba136428035ac9fc71f0ec99791a53b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://997fd5172ccd393f7befe4343619dd30eba136428035ac9fc71f0ec99791a53b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://20198b637812738041fd2e57cdb24097b8f92af726b1ad103fee423469a27136\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://20198b637812738041fd2e57cdb24097b8f92af726b1ad103fee423469a27136\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-7fk5l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:53Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:54 crc kubenswrapper[4798]: I1011 03:55:54.000777 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-svt4z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3c2a342b-5252-4957-9e2c-8f81c304b8af\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5a66de04a274f07f090f66d9da727e1985f9c171038de366b409a0300d1b6cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5667e3d7288833ff8c576f64f111c3a2f59619a3577f969d0afdd633962bd30d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dd63d88fe2ebe6035765ee3f3c05c2ef747f9b2869da239825c08667935c9fb5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://671945e25068ff79ab8574dd96b2d81646b8240da41f34a10aa857f0c2520df2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://337fb496a29d8027f6400b57b1625b68029a16e368139f753b9b94df22448477\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c9529e6c53d42e111799753c41ef620b382b2a2b1991aa71d4ee846029a5136\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a9f55c50b0f40e90d5078225583f31860d2cfb67776e6b11ebdc9a41d8b20baf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a9f55c50b0f40e90d5078225583f31860d2cfb67776e6b11ebdc9a41d8b20baf\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-11T03:55:52Z\\\",\\\"message\\\":\\\"UUIDName:}]\\\\nI1011 03:55:52.357804 6446 model_client.go:398] Mutate operations generated as: [{Op:mutate Table:Logical_Router Row:map[] Rows:[] Columns:[] Mutations:[{Column:nat Mutator:insert Value:{GoSet:[{GoUUID:73135118-cf1b-4568-bd31-2f50308bf69d}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {e3c4661a-36a6-47f0-a6c0-a4ee741f2224}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1011 03:55:52.356105 6446 obj_retry.go:303] Retry object setup: *v1.Pod openshift-kube-apiserver/kube-apiserver-crc\\\\nI1011 03:55:52.357905 6446 obj_retry.go:365] Adding new object: *v1.Pod openshift-kube-apiserver/kube-apiserver-crc\\\\nI1011 03:55:52.357951 6446 ovn.go:134] Ensuring zone local for Pod openshift-kube-apiserver/kube-apiserver-crc in node crc\\\\nI1011 03:55:52.357979 6446 obj_retry.go:386] Retry successful for *v1.Pod openshift-kube-apiserver/kube-apiserver-crc after 0 failed attempt(s)\\\\nI1011 03:55:52.358011 6446 default_network_controller.go:776] Recording success event on pod openshift-kube-apiserver/kube-apiserver-crc\\\\nI1011 03:55:52.358029 6446 services_controller.go:356] Processing sync for service openshift-ingress-operator/metrics for network=default\\\\nF1011 03:55:52.357999 6446 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:51Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-svt4z_openshift-ovn-kubernetes(3c2a342b-5252-4957-9e2c-8f81c304b8af)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://732433bbe785758ed4cbedf667608c4780a5d1e7e2eafd432735f0d74ea01d48\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9f8c139ffa0a5dc24b40f6d5bcc2fa9422f58b275692d3ce3ba5a928c1e454a7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f8c139ffa0a5dc24b40f6d5bcc2fa9422f58b275692d3ce3ba5a928c1e454a7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:26Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-svt4z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:53Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:54 crc kubenswrapper[4798]: I1011 03:55:54.012869 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-cwk9h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a97c4ffd-df63-4ce6-8903-80849a23a9fa\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3dc58b162f1d2b3447282c149ec7cd6d60b4c53fa3cf51dfb4a821d115c9a2c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pfvbf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:28Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-cwk9h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:54Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:54 crc kubenswrapper[4798]: I1011 03:55:54.020872 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:54 crc kubenswrapper[4798]: I1011 03:55:54.020923 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:54 crc kubenswrapper[4798]: I1011 03:55:54.020933 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:54 crc kubenswrapper[4798]: I1011 03:55:54.020945 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:54 crc kubenswrapper[4798]: I1011 03:55:54.020953 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:54Z","lastTransitionTime":"2025-10-11T03:55:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:54 crc kubenswrapper[4798]: I1011 03:55:54.030002 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bfd670e2-5a6d-4d56-97d6-c72b5474cfe6\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d79289a0b2f4d9eee836f57ba898bd329060194772e80855547f1982371b0921\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1970a648bf4ee991b9b58bfade524bee260b45f727cba5864a6545a10487b3ae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a79edde80930f3aad632c14d6ad6b3df6f8449f32107a82eb906785e873e1ab7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4b8e09f509a9f25922d49024be407873d79d01edb604193f9d19fcd6c89cf96d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://805f894634e76814dc3071315ddd3d8ac46cdf65a12e97f4ca56b98fe53ab25a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-11T03:55:19Z\\\",\\\"message\\\":\\\"W1011 03:55:08.564227 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1011 03:55:08.564630 1 crypto.go:601] Generating new CA for check-endpoints-signer@1760154908 cert, and key in /tmp/serving-cert-1527351980/serving-signer.crt, /tmp/serving-cert-1527351980/serving-signer.key\\\\nI1011 03:55:08.869630 1 observer_polling.go:159] Starting file observer\\\\nW1011 03:55:08.874583 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1011 03:55:08.874721 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1011 03:55:08.878058 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1527351980/tls.crt::/tmp/serving-cert-1527351980/tls.key\\\\\\\"\\\\nF1011 03:55:19.813421 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:08Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://58ca7c079d81ba990c4508c024d6db7c3503be1d628477798459b32e2374ef22\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:08Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1171e7b719c532d99d1d222fc5dbc434060d5d149bd2b6b3dea02f9716816b26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1171e7b719c532d99d1d222fc5dbc434060d5d149bd2b6b3dea02f9716816b26\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:05Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:54Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:54 crc kubenswrapper[4798]: I1011 03:55:54.041327 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2f4f8842-e378-44a9-b919-b1bf9a8df80e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://903e13c07c622087e30f21ffff2b60c31c852d8e994c23d7af6beab5a71bb873\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1087e7b8de486a192155ed94497edbca0dc98e4a84a577267fb132fc48e6b142\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://08a33c11cee6a48ebcf66f251111b8bd888bd40fe6b52f43a3768eac817ed59f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eff419f507fc5362e956419c6fd14dc6a161f7297b026a5ec449647051f98767\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:05Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:54Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:54 crc kubenswrapper[4798]: I1011 03:55:54.051733 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4936cbe98df337a2b4d0d6e3effe0afd990facff65daf923c475c9108e05fef9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:54Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:54 crc kubenswrapper[4798]: I1011 03:55:54.062840 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:54Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:54 crc kubenswrapper[4798]: I1011 03:55:54.072206 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kszgd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e999fc8d-dd8d-415f-b72b-1c6189cf18a9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a92a11778001614d226c95af9dde787c78232568c3b5c073618a843f1225b21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h6wm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kszgd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:54Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:54 crc kubenswrapper[4798]: I1011 03:55:54.083506 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:54Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:54 crc kubenswrapper[4798]: I1011 03:55:54.099002 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d9be402e-40bb-4e2d-be8b-349646c34f1a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6897a04b7204031c861d74f6a4d475af9e9b88220cf02bfa50e7845c85cd5f70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b88dcbb90798e97c13f7e7ef7bc7171bb1dc33cf488bd453a8d263a02093dfd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b2fb58514dfb39402d4e10ca699975671a3c53a7fe54b374a987efa8ba401ccf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5844d6ac6f40fe8f9056708ee6ffe64a95b58a4dd50582e455860ce6e5628cd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5844d6ac6f40fe8f9056708ee6ffe64a95b58a4dd50582e455860ce6e5628cd3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:06Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:05Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:54Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:54 crc kubenswrapper[4798]: I1011 03:55:54.111665 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5ec61df088ad49a0392264c33da84b7324c4aa608201d71e309401ab80013fb8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://10697a88b117962307b632f70354acdfd493e92c2909908a31c90c754dcbc0ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:54Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:54 crc kubenswrapper[4798]: I1011 03:55:54.121445 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-5bfzt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3cf2b185-9a26-4448-8fc5-4885f98daf87\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jdt7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jdt7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:39Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-5bfzt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:54Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:54 crc kubenswrapper[4798]: I1011 03:55:54.122766 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:54 crc kubenswrapper[4798]: I1011 03:55:54.122811 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:54 crc kubenswrapper[4798]: I1011 03:55:54.122821 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:54 crc kubenswrapper[4798]: I1011 03:55:54.122836 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:54 crc kubenswrapper[4798]: I1011 03:55:54.122845 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:54Z","lastTransitionTime":"2025-10-11T03:55:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:54 crc kubenswrapper[4798]: I1011 03:55:54.225799 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:54 crc kubenswrapper[4798]: I1011 03:55:54.226619 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:54 crc kubenswrapper[4798]: I1011 03:55:54.226662 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:54 crc kubenswrapper[4798]: I1011 03:55:54.226696 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:54 crc kubenswrapper[4798]: I1011 03:55:54.226721 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:54Z","lastTransitionTime":"2025-10-11T03:55:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:54 crc kubenswrapper[4798]: I1011 03:55:54.330110 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:54 crc kubenswrapper[4798]: I1011 03:55:54.330181 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:54 crc kubenswrapper[4798]: I1011 03:55:54.330193 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:54 crc kubenswrapper[4798]: I1011 03:55:54.330214 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:54 crc kubenswrapper[4798]: I1011 03:55:54.330227 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:54Z","lastTransitionTime":"2025-10-11T03:55:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:54 crc kubenswrapper[4798]: I1011 03:55:54.433137 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:54 crc kubenswrapper[4798]: I1011 03:55:54.433180 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:54 crc kubenswrapper[4798]: I1011 03:55:54.433189 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:54 crc kubenswrapper[4798]: I1011 03:55:54.433203 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:54 crc kubenswrapper[4798]: I1011 03:55:54.433212 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:54Z","lastTransitionTime":"2025-10-11T03:55:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:54 crc kubenswrapper[4798]: I1011 03:55:54.535468 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:54 crc kubenswrapper[4798]: I1011 03:55:54.535515 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:54 crc kubenswrapper[4798]: I1011 03:55:54.535527 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:54 crc kubenswrapper[4798]: I1011 03:55:54.535544 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:54 crc kubenswrapper[4798]: I1011 03:55:54.535556 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:54Z","lastTransitionTime":"2025-10-11T03:55:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:54 crc kubenswrapper[4798]: I1011 03:55:54.638352 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:54 crc kubenswrapper[4798]: I1011 03:55:54.638448 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:54 crc kubenswrapper[4798]: I1011 03:55:54.638462 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:54 crc kubenswrapper[4798]: I1011 03:55:54.638492 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:54 crc kubenswrapper[4798]: I1011 03:55:54.638509 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:54Z","lastTransitionTime":"2025-10-11T03:55:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:54 crc kubenswrapper[4798]: I1011 03:55:54.740673 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:54 crc kubenswrapper[4798]: I1011 03:55:54.740710 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:54 crc kubenswrapper[4798]: I1011 03:55:54.740719 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:54 crc kubenswrapper[4798]: I1011 03:55:54.740733 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:54 crc kubenswrapper[4798]: I1011 03:55:54.740741 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:54Z","lastTransitionTime":"2025-10-11T03:55:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:54 crc kubenswrapper[4798]: I1011 03:55:54.842863 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:54 crc kubenswrapper[4798]: I1011 03:55:54.842919 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:54 crc kubenswrapper[4798]: I1011 03:55:54.842932 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:54 crc kubenswrapper[4798]: I1011 03:55:54.842949 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:54 crc kubenswrapper[4798]: I1011 03:55:54.842962 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:54Z","lastTransitionTime":"2025-10-11T03:55:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:54 crc kubenswrapper[4798]: I1011 03:55:54.945878 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:54 crc kubenswrapper[4798]: I1011 03:55:54.945918 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:54 crc kubenswrapper[4798]: I1011 03:55:54.945930 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:54 crc kubenswrapper[4798]: I1011 03:55:54.945948 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:54 crc kubenswrapper[4798]: I1011 03:55:54.945959 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:54Z","lastTransitionTime":"2025-10-11T03:55:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:55 crc kubenswrapper[4798]: I1011 03:55:55.047934 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:55 crc kubenswrapper[4798]: I1011 03:55:55.047976 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:55 crc kubenswrapper[4798]: I1011 03:55:55.047987 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:55 crc kubenswrapper[4798]: I1011 03:55:55.048002 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:55 crc kubenswrapper[4798]: I1011 03:55:55.048013 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:55Z","lastTransitionTime":"2025-10-11T03:55:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:55 crc kubenswrapper[4798]: I1011 03:55:55.151664 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:55 crc kubenswrapper[4798]: I1011 03:55:55.151718 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:55 crc kubenswrapper[4798]: I1011 03:55:55.151730 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:55 crc kubenswrapper[4798]: I1011 03:55:55.151750 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:55 crc kubenswrapper[4798]: I1011 03:55:55.151762 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:55Z","lastTransitionTime":"2025-10-11T03:55:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:55 crc kubenswrapper[4798]: I1011 03:55:55.254158 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:55 crc kubenswrapper[4798]: I1011 03:55:55.254226 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:55 crc kubenswrapper[4798]: I1011 03:55:55.254248 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:55 crc kubenswrapper[4798]: I1011 03:55:55.254278 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:55 crc kubenswrapper[4798]: I1011 03:55:55.254303 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:55Z","lastTransitionTime":"2025-10-11T03:55:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:55 crc kubenswrapper[4798]: I1011 03:55:55.310924 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/3cf2b185-9a26-4448-8fc5-4885f98daf87-metrics-certs\") pod \"network-metrics-daemon-5bfzt\" (UID: \"3cf2b185-9a26-4448-8fc5-4885f98daf87\") " pod="openshift-multus/network-metrics-daemon-5bfzt" Oct 11 03:55:55 crc kubenswrapper[4798]: E1011 03:55:55.311148 4798 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Oct 11 03:55:55 crc kubenswrapper[4798]: E1011 03:55:55.311248 4798 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/3cf2b185-9a26-4448-8fc5-4885f98daf87-metrics-certs podName:3cf2b185-9a26-4448-8fc5-4885f98daf87 nodeName:}" failed. No retries permitted until 2025-10-11 03:56:11.311228014 +0000 UTC m=+66.647517700 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/3cf2b185-9a26-4448-8fc5-4885f98daf87-metrics-certs") pod "network-metrics-daemon-5bfzt" (UID: "3cf2b185-9a26-4448-8fc5-4885f98daf87") : object "openshift-multus"/"metrics-daemon-secret" not registered Oct 11 03:55:55 crc kubenswrapper[4798]: I1011 03:55:55.357172 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:55 crc kubenswrapper[4798]: I1011 03:55:55.357205 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:55 crc kubenswrapper[4798]: I1011 03:55:55.357214 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:55 crc kubenswrapper[4798]: I1011 03:55:55.357227 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:55 crc kubenswrapper[4798]: I1011 03:55:55.357237 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:55Z","lastTransitionTime":"2025-10-11T03:55:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:55 crc kubenswrapper[4798]: I1011 03:55:55.422722 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 11 03:55:55 crc kubenswrapper[4798]: I1011 03:55:55.422747 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-5bfzt" Oct 11 03:55:55 crc kubenswrapper[4798]: I1011 03:55:55.422796 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 11 03:55:55 crc kubenswrapper[4798]: E1011 03:55:55.422866 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 11 03:55:55 crc kubenswrapper[4798]: I1011 03:55:55.422901 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 11 03:55:55 crc kubenswrapper[4798]: E1011 03:55:55.422920 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-5bfzt" podUID="3cf2b185-9a26-4448-8fc5-4885f98daf87" Oct 11 03:55:55 crc kubenswrapper[4798]: E1011 03:55:55.423013 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 11 03:55:55 crc kubenswrapper[4798]: E1011 03:55:55.423105 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 11 03:55:55 crc kubenswrapper[4798]: I1011 03:55:55.441438 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d9be402e-40bb-4e2d-be8b-349646c34f1a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6897a04b7204031c861d74f6a4d475af9e9b88220cf02bfa50e7845c85cd5f70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b88dcbb90798e97c13f7e7ef7bc7171bb1dc33cf488bd453a8d263a02093dfd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b2fb58514dfb39402d4e10ca699975671a3c53a7fe54b374a987efa8ba401ccf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5844d6ac6f40fe8f9056708ee6ffe64a95b58a4dd50582e455860ce6e5628cd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5844d6ac6f40fe8f9056708ee6ffe64a95b58a4dd50582e455860ce6e5628cd3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:06Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:05Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:55Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:55 crc kubenswrapper[4798]: I1011 03:55:55.459815 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:55 crc kubenswrapper[4798]: I1011 03:55:55.459864 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:55 crc kubenswrapper[4798]: I1011 03:55:55.459877 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:55 crc kubenswrapper[4798]: I1011 03:55:55.459896 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:55 crc kubenswrapper[4798]: I1011 03:55:55.459908 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:55Z","lastTransitionTime":"2025-10-11T03:55:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:55 crc kubenswrapper[4798]: I1011 03:55:55.461834 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5ec61df088ad49a0392264c33da84b7324c4aa608201d71e309401ab80013fb8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://10697a88b117962307b632f70354acdfd493e92c2909908a31c90c754dcbc0ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:55Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:55 crc kubenswrapper[4798]: I1011 03:55:55.474700 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-5bfzt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3cf2b185-9a26-4448-8fc5-4885f98daf87\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jdt7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jdt7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:39Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-5bfzt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:55Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:55 crc kubenswrapper[4798]: I1011 03:55:55.487246 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-5sxhs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"89cbb20d-9fdb-4fed-9ad2-acb1207afc46\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a3655b5826d719e04f0972221ea72554a15025acffa8e2689b4b2855d6151d9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z7jvd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f0c589033410abafbc39c6a7386343c70bb92ed0a1d7336188a44cc6e4fd65c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z7jvd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-5sxhs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:55Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:55 crc kubenswrapper[4798]: I1011 03:55:55.501900 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6129cc4f78a62c5fb76f4859ec954c8901954d27b7f05672e97d992e0c933191\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:55Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:55 crc kubenswrapper[4798]: I1011 03:55:55.519214 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-p6xdd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bd9c0e44-3329-422a-907b-e9e9bb6194cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a46e4bfff29af67bd6304e68df8fb4aa7e2184648b85398f453fde878b27d825\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p9wst\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-p6xdd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:55Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:55 crc kubenswrapper[4798]: I1011 03:55:55.530610 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-cwk9h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a97c4ffd-df63-4ce6-8903-80849a23a9fa\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3dc58b162f1d2b3447282c149ec7cd6d60b4c53fa3cf51dfb4a821d115c9a2c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pfvbf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:28Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-cwk9h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:55Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:55 crc kubenswrapper[4798]: I1011 03:55:55.547015 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:55Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:55 crc kubenswrapper[4798]: I1011 03:55:55.562304 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:55 crc kubenswrapper[4798]: I1011 03:55:55.562351 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:55 crc kubenswrapper[4798]: I1011 03:55:55.562361 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:55 crc kubenswrapper[4798]: I1011 03:55:55.562377 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:55 crc kubenswrapper[4798]: I1011 03:55:55.562386 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:55Z","lastTransitionTime":"2025-10-11T03:55:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:55 crc kubenswrapper[4798]: I1011 03:55:55.563321 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42571bc8-2186-4e3b-bba9-28f5a8f364d0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c66c40f71f0877eee058976f94625fe1c4bcab5dfad363be39e6863c2e9f07a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5gdw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d4862faa12fa49bd57098b8b4e9e9dd287da8c8808a9ca32bfdd3a9b4313945d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5gdw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-h28s2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:55Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:55 crc kubenswrapper[4798]: I1011 03:55:55.578085 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-7fk5l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4b6a179c-1d23-4cb2-91b9-d34496a74456\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5bfabccc875a12072ad81dc6bf74d849c99baa004c10d213f798c363248137db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b908ba64c3d6d1d45e1cab82e69176c3c84bf88dde3e9425fc7c3b4c84685ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8b908ba64c3d6d1d45e1cab82e69176c3c84bf88dde3e9425fc7c3b4c84685ed\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae38c0781a4a8367bba95648253e72409e06cdc39825b137084fa6f4d33a4cb7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ae38c0781a4a8367bba95648253e72409e06cdc39825b137084fa6f4d33a4cb7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b663ffff34d80940880b3959c81519084b55bd5edcedbce9402d5fad1f898034\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b663ffff34d80940880b3959c81519084b55bd5edcedbce9402d5fad1f898034\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5b4df13fbe7bbb9ba49f772c2bd4bfe6a10a17bfa12bc500f0647659be7b0cdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5b4df13fbe7bbb9ba49f772c2bd4bfe6a10a17bfa12bc500f0647659be7b0cdf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://997fd5172ccd393f7befe4343619dd30eba136428035ac9fc71f0ec99791a53b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://997fd5172ccd393f7befe4343619dd30eba136428035ac9fc71f0ec99791a53b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://20198b637812738041fd2e57cdb24097b8f92af726b1ad103fee423469a27136\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://20198b637812738041fd2e57cdb24097b8f92af726b1ad103fee423469a27136\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-7fk5l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:55Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:55 crc kubenswrapper[4798]: I1011 03:55:55.599060 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-svt4z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3c2a342b-5252-4957-9e2c-8f81c304b8af\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5a66de04a274f07f090f66d9da727e1985f9c171038de366b409a0300d1b6cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5667e3d7288833ff8c576f64f111c3a2f59619a3577f969d0afdd633962bd30d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dd63d88fe2ebe6035765ee3f3c05c2ef747f9b2869da239825c08667935c9fb5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://671945e25068ff79ab8574dd96b2d81646b8240da41f34a10aa857f0c2520df2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://337fb496a29d8027f6400b57b1625b68029a16e368139f753b9b94df22448477\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c9529e6c53d42e111799753c41ef620b382b2a2b1991aa71d4ee846029a5136\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a9f55c50b0f40e90d5078225583f31860d2cfb67776e6b11ebdc9a41d8b20baf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a9f55c50b0f40e90d5078225583f31860d2cfb67776e6b11ebdc9a41d8b20baf\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-11T03:55:52Z\\\",\\\"message\\\":\\\"UUIDName:}]\\\\nI1011 03:55:52.357804 6446 model_client.go:398] Mutate operations generated as: [{Op:mutate Table:Logical_Router Row:map[] Rows:[] Columns:[] Mutations:[{Column:nat Mutator:insert Value:{GoSet:[{GoUUID:73135118-cf1b-4568-bd31-2f50308bf69d}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {e3c4661a-36a6-47f0-a6c0-a4ee741f2224}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1011 03:55:52.356105 6446 obj_retry.go:303] Retry object setup: *v1.Pod openshift-kube-apiserver/kube-apiserver-crc\\\\nI1011 03:55:52.357905 6446 obj_retry.go:365] Adding new object: *v1.Pod openshift-kube-apiserver/kube-apiserver-crc\\\\nI1011 03:55:52.357951 6446 ovn.go:134] Ensuring zone local for Pod openshift-kube-apiserver/kube-apiserver-crc in node crc\\\\nI1011 03:55:52.357979 6446 obj_retry.go:386] Retry successful for *v1.Pod openshift-kube-apiserver/kube-apiserver-crc after 0 failed attempt(s)\\\\nI1011 03:55:52.358011 6446 default_network_controller.go:776] Recording success event on pod openshift-kube-apiserver/kube-apiserver-crc\\\\nI1011 03:55:52.358029 6446 services_controller.go:356] Processing sync for service openshift-ingress-operator/metrics for network=default\\\\nF1011 03:55:52.357999 6446 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:51Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-svt4z_openshift-ovn-kubernetes(3c2a342b-5252-4957-9e2c-8f81c304b8af)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://732433bbe785758ed4cbedf667608c4780a5d1e7e2eafd432735f0d74ea01d48\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9f8c139ffa0a5dc24b40f6d5bcc2fa9422f58b275692d3ce3ba5a928c1e454a7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f8c139ffa0a5dc24b40f6d5bcc2fa9422f58b275692d3ce3ba5a928c1e454a7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:26Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-svt4z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:55Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:55 crc kubenswrapper[4798]: I1011 03:55:55.608189 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kszgd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e999fc8d-dd8d-415f-b72b-1c6189cf18a9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a92a11778001614d226c95af9dde787c78232568c3b5c073618a843f1225b21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h6wm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kszgd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:55Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:55 crc kubenswrapper[4798]: I1011 03:55:55.618930 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:55Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:55 crc kubenswrapper[4798]: I1011 03:55:55.630674 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bfd670e2-5a6d-4d56-97d6-c72b5474cfe6\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d79289a0b2f4d9eee836f57ba898bd329060194772e80855547f1982371b0921\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1970a648bf4ee991b9b58bfade524bee260b45f727cba5864a6545a10487b3ae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a79edde80930f3aad632c14d6ad6b3df6f8449f32107a82eb906785e873e1ab7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4b8e09f509a9f25922d49024be407873d79d01edb604193f9d19fcd6c89cf96d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://805f894634e76814dc3071315ddd3d8ac46cdf65a12e97f4ca56b98fe53ab25a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-11T03:55:19Z\\\",\\\"message\\\":\\\"W1011 03:55:08.564227 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1011 03:55:08.564630 1 crypto.go:601] Generating new CA for check-endpoints-signer@1760154908 cert, and key in /tmp/serving-cert-1527351980/serving-signer.crt, /tmp/serving-cert-1527351980/serving-signer.key\\\\nI1011 03:55:08.869630 1 observer_polling.go:159] Starting file observer\\\\nW1011 03:55:08.874583 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1011 03:55:08.874721 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1011 03:55:08.878058 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1527351980/tls.crt::/tmp/serving-cert-1527351980/tls.key\\\\\\\"\\\\nF1011 03:55:19.813421 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:08Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://58ca7c079d81ba990c4508c024d6db7c3503be1d628477798459b32e2374ef22\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:08Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1171e7b719c532d99d1d222fc5dbc434060d5d149bd2b6b3dea02f9716816b26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1171e7b719c532d99d1d222fc5dbc434060d5d149bd2b6b3dea02f9716816b26\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:05Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:55Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:55 crc kubenswrapper[4798]: I1011 03:55:55.645221 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2f4f8842-e378-44a9-b919-b1bf9a8df80e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://903e13c07c622087e30f21ffff2b60c31c852d8e994c23d7af6beab5a71bb873\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1087e7b8de486a192155ed94497edbca0dc98e4a84a577267fb132fc48e6b142\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://08a33c11cee6a48ebcf66f251111b8bd888bd40fe6b52f43a3768eac817ed59f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eff419f507fc5362e956419c6fd14dc6a161f7297b026a5ec449647051f98767\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:05Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:55Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:55 crc kubenswrapper[4798]: I1011 03:55:55.659827 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4936cbe98df337a2b4d0d6e3effe0afd990facff65daf923c475c9108e05fef9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:55Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:55 crc kubenswrapper[4798]: I1011 03:55:55.666065 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:55 crc kubenswrapper[4798]: I1011 03:55:55.666116 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:55 crc kubenswrapper[4798]: I1011 03:55:55.666139 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:55 crc kubenswrapper[4798]: I1011 03:55:55.666166 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:55 crc kubenswrapper[4798]: I1011 03:55:55.666186 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:55Z","lastTransitionTime":"2025-10-11T03:55:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:55 crc kubenswrapper[4798]: I1011 03:55:55.673700 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:55:55Z is after 2025-08-24T17:21:41Z" Oct 11 03:55:55 crc kubenswrapper[4798]: I1011 03:55:55.768365 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:55 crc kubenswrapper[4798]: I1011 03:55:55.768425 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:55 crc kubenswrapper[4798]: I1011 03:55:55.768435 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:55 crc kubenswrapper[4798]: I1011 03:55:55.768453 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:55 crc kubenswrapper[4798]: I1011 03:55:55.768464 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:55Z","lastTransitionTime":"2025-10-11T03:55:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:55 crc kubenswrapper[4798]: I1011 03:55:55.870060 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:55 crc kubenswrapper[4798]: I1011 03:55:55.870093 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:55 crc kubenswrapper[4798]: I1011 03:55:55.870103 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:55 crc kubenswrapper[4798]: I1011 03:55:55.870116 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:55 crc kubenswrapper[4798]: I1011 03:55:55.870126 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:55Z","lastTransitionTime":"2025-10-11T03:55:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:55 crc kubenswrapper[4798]: I1011 03:55:55.972097 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:55 crc kubenswrapper[4798]: I1011 03:55:55.972139 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:55 crc kubenswrapper[4798]: I1011 03:55:55.972151 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:55 crc kubenswrapper[4798]: I1011 03:55:55.972165 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:55 crc kubenswrapper[4798]: I1011 03:55:55.972175 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:55Z","lastTransitionTime":"2025-10-11T03:55:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:56 crc kubenswrapper[4798]: I1011 03:55:56.073934 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:56 crc kubenswrapper[4798]: I1011 03:55:56.073980 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:56 crc kubenswrapper[4798]: I1011 03:55:56.073993 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:56 crc kubenswrapper[4798]: I1011 03:55:56.074011 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:56 crc kubenswrapper[4798]: I1011 03:55:56.074022 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:56Z","lastTransitionTime":"2025-10-11T03:55:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:56 crc kubenswrapper[4798]: I1011 03:55:56.176796 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:56 crc kubenswrapper[4798]: I1011 03:55:56.176837 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:56 crc kubenswrapper[4798]: I1011 03:55:56.176847 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:56 crc kubenswrapper[4798]: I1011 03:55:56.176861 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:56 crc kubenswrapper[4798]: I1011 03:55:56.176870 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:56Z","lastTransitionTime":"2025-10-11T03:55:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:56 crc kubenswrapper[4798]: I1011 03:55:56.280332 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:56 crc kubenswrapper[4798]: I1011 03:55:56.280375 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:56 crc kubenswrapper[4798]: I1011 03:55:56.280406 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:56 crc kubenswrapper[4798]: I1011 03:55:56.280425 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:56 crc kubenswrapper[4798]: I1011 03:55:56.280435 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:56Z","lastTransitionTime":"2025-10-11T03:55:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:56 crc kubenswrapper[4798]: I1011 03:55:56.382999 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:56 crc kubenswrapper[4798]: I1011 03:55:56.383040 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:56 crc kubenswrapper[4798]: I1011 03:55:56.383051 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:56 crc kubenswrapper[4798]: I1011 03:55:56.383069 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:56 crc kubenswrapper[4798]: I1011 03:55:56.383081 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:56Z","lastTransitionTime":"2025-10-11T03:55:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:56 crc kubenswrapper[4798]: I1011 03:55:56.485806 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:56 crc kubenswrapper[4798]: I1011 03:55:56.485844 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:56 crc kubenswrapper[4798]: I1011 03:55:56.485859 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:56 crc kubenswrapper[4798]: I1011 03:55:56.485877 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:56 crc kubenswrapper[4798]: I1011 03:55:56.485890 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:56Z","lastTransitionTime":"2025-10-11T03:55:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:56 crc kubenswrapper[4798]: I1011 03:55:56.588868 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:56 crc kubenswrapper[4798]: I1011 03:55:56.588952 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:56 crc kubenswrapper[4798]: I1011 03:55:56.588974 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:56 crc kubenswrapper[4798]: I1011 03:55:56.589005 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:56 crc kubenswrapper[4798]: I1011 03:55:56.589026 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:56Z","lastTransitionTime":"2025-10-11T03:55:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:56 crc kubenswrapper[4798]: I1011 03:55:56.690968 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:56 crc kubenswrapper[4798]: I1011 03:55:56.691022 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:56 crc kubenswrapper[4798]: I1011 03:55:56.691035 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:56 crc kubenswrapper[4798]: I1011 03:55:56.691053 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:56 crc kubenswrapper[4798]: I1011 03:55:56.691064 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:56Z","lastTransitionTime":"2025-10-11T03:55:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:56 crc kubenswrapper[4798]: I1011 03:55:56.794613 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:56 crc kubenswrapper[4798]: I1011 03:55:56.794670 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:56 crc kubenswrapper[4798]: I1011 03:55:56.794682 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:56 crc kubenswrapper[4798]: I1011 03:55:56.794703 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:56 crc kubenswrapper[4798]: I1011 03:55:56.794715 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:56Z","lastTransitionTime":"2025-10-11T03:55:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:56 crc kubenswrapper[4798]: I1011 03:55:56.897882 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:56 crc kubenswrapper[4798]: I1011 03:55:56.897926 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:56 crc kubenswrapper[4798]: I1011 03:55:56.897937 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:56 crc kubenswrapper[4798]: I1011 03:55:56.897953 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:56 crc kubenswrapper[4798]: I1011 03:55:56.897964 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:56Z","lastTransitionTime":"2025-10-11T03:55:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:57 crc kubenswrapper[4798]: I1011 03:55:57.001013 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:57 crc kubenswrapper[4798]: I1011 03:55:57.001081 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:57 crc kubenswrapper[4798]: I1011 03:55:57.001102 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:57 crc kubenswrapper[4798]: I1011 03:55:57.001136 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:57 crc kubenswrapper[4798]: I1011 03:55:57.001153 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:57Z","lastTransitionTime":"2025-10-11T03:55:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:57 crc kubenswrapper[4798]: I1011 03:55:57.104614 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:57 crc kubenswrapper[4798]: I1011 03:55:57.104691 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:57 crc kubenswrapper[4798]: I1011 03:55:57.104712 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:57 crc kubenswrapper[4798]: I1011 03:55:57.104745 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:57 crc kubenswrapper[4798]: I1011 03:55:57.104766 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:57Z","lastTransitionTime":"2025-10-11T03:55:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:57 crc kubenswrapper[4798]: I1011 03:55:57.131679 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 11 03:55:57 crc kubenswrapper[4798]: I1011 03:55:57.131738 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 11 03:55:57 crc kubenswrapper[4798]: I1011 03:55:57.131757 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 11 03:55:57 crc kubenswrapper[4798]: I1011 03:55:57.131789 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 11 03:55:57 crc kubenswrapper[4798]: E1011 03:55:57.131889 4798 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Oct 11 03:55:57 crc kubenswrapper[4798]: E1011 03:55:57.131945 4798 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-10-11 03:56:29.131929377 +0000 UTC m=+84.468219063 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Oct 11 03:55:57 crc kubenswrapper[4798]: E1011 03:55:57.131999 4798 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Oct 11 03:55:57 crc kubenswrapper[4798]: E1011 03:55:57.132021 4798 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Oct 11 03:55:57 crc kubenswrapper[4798]: E1011 03:55:57.132137 4798 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Oct 11 03:55:57 crc kubenswrapper[4798]: E1011 03:55:57.132148 4798 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Oct 11 03:55:57 crc kubenswrapper[4798]: E1011 03:55:57.132178 4798 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-10-11 03:56:29.132118342 +0000 UTC m=+84.468408118 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Oct 11 03:55:57 crc kubenswrapper[4798]: E1011 03:55:57.132197 4798 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Oct 11 03:55:57 crc kubenswrapper[4798]: E1011 03:55:57.132226 4798 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Oct 11 03:55:57 crc kubenswrapper[4798]: E1011 03:55:57.132297 4798 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-10-11 03:56:29.132274515 +0000 UTC m=+84.468564381 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Oct 11 03:55:57 crc kubenswrapper[4798]: E1011 03:55:57.132167 4798 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Oct 11 03:55:57 crc kubenswrapper[4798]: E1011 03:55:57.132359 4798 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-10-11 03:56:29.132350868 +0000 UTC m=+84.468640554 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Oct 11 03:55:57 crc kubenswrapper[4798]: I1011 03:55:57.207862 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:57 crc kubenswrapper[4798]: I1011 03:55:57.207907 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:57 crc kubenswrapper[4798]: I1011 03:55:57.207917 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:57 crc kubenswrapper[4798]: I1011 03:55:57.207933 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:57 crc kubenswrapper[4798]: I1011 03:55:57.207945 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:57Z","lastTransitionTime":"2025-10-11T03:55:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:57 crc kubenswrapper[4798]: I1011 03:55:57.232783 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 03:55:57 crc kubenswrapper[4798]: E1011 03:55:57.232990 4798 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-11 03:56:29.232960794 +0000 UTC m=+84.569250490 (durationBeforeRetry 32s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 03:55:57 crc kubenswrapper[4798]: I1011 03:55:57.311536 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:57 crc kubenswrapper[4798]: I1011 03:55:57.311630 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:57 crc kubenswrapper[4798]: I1011 03:55:57.311651 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:57 crc kubenswrapper[4798]: I1011 03:55:57.311687 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:57 crc kubenswrapper[4798]: I1011 03:55:57.311714 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:57Z","lastTransitionTime":"2025-10-11T03:55:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:57 crc kubenswrapper[4798]: I1011 03:55:57.414981 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:57 crc kubenswrapper[4798]: I1011 03:55:57.415053 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:57 crc kubenswrapper[4798]: I1011 03:55:57.415072 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:57 crc kubenswrapper[4798]: I1011 03:55:57.415103 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:57 crc kubenswrapper[4798]: I1011 03:55:57.415128 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:57Z","lastTransitionTime":"2025-10-11T03:55:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:57 crc kubenswrapper[4798]: I1011 03:55:57.423371 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 11 03:55:57 crc kubenswrapper[4798]: I1011 03:55:57.423437 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 11 03:55:57 crc kubenswrapper[4798]: I1011 03:55:57.423465 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 11 03:55:57 crc kubenswrapper[4798]: E1011 03:55:57.423600 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 11 03:55:57 crc kubenswrapper[4798]: I1011 03:55:57.423658 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-5bfzt" Oct 11 03:55:57 crc kubenswrapper[4798]: E1011 03:55:57.423783 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 11 03:55:57 crc kubenswrapper[4798]: E1011 03:55:57.423849 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-5bfzt" podUID="3cf2b185-9a26-4448-8fc5-4885f98daf87" Oct 11 03:55:57 crc kubenswrapper[4798]: E1011 03:55:57.423939 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 11 03:55:57 crc kubenswrapper[4798]: I1011 03:55:57.518081 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:57 crc kubenswrapper[4798]: I1011 03:55:57.518152 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:57 crc kubenswrapper[4798]: I1011 03:55:57.518169 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:57 crc kubenswrapper[4798]: I1011 03:55:57.518196 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:57 crc kubenswrapper[4798]: I1011 03:55:57.518213 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:57Z","lastTransitionTime":"2025-10-11T03:55:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:57 crc kubenswrapper[4798]: I1011 03:55:57.620976 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:57 crc kubenswrapper[4798]: I1011 03:55:57.621036 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:57 crc kubenswrapper[4798]: I1011 03:55:57.621051 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:57 crc kubenswrapper[4798]: I1011 03:55:57.621072 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:57 crc kubenswrapper[4798]: I1011 03:55:57.621088 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:57Z","lastTransitionTime":"2025-10-11T03:55:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:57 crc kubenswrapper[4798]: I1011 03:55:57.724795 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:57 crc kubenswrapper[4798]: I1011 03:55:57.724880 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:57 crc kubenswrapper[4798]: I1011 03:55:57.724906 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:57 crc kubenswrapper[4798]: I1011 03:55:57.724948 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:57 crc kubenswrapper[4798]: I1011 03:55:57.724975 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:57Z","lastTransitionTime":"2025-10-11T03:55:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:57 crc kubenswrapper[4798]: I1011 03:55:57.830006 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:57 crc kubenswrapper[4798]: I1011 03:55:57.830108 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:57 crc kubenswrapper[4798]: I1011 03:55:57.830128 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:57 crc kubenswrapper[4798]: I1011 03:55:57.830159 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:57 crc kubenswrapper[4798]: I1011 03:55:57.830180 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:57Z","lastTransitionTime":"2025-10-11T03:55:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:57 crc kubenswrapper[4798]: I1011 03:55:57.933199 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:57 crc kubenswrapper[4798]: I1011 03:55:57.933248 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:57 crc kubenswrapper[4798]: I1011 03:55:57.933257 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:57 crc kubenswrapper[4798]: I1011 03:55:57.933273 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:57 crc kubenswrapper[4798]: I1011 03:55:57.933282 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:57Z","lastTransitionTime":"2025-10-11T03:55:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:58 crc kubenswrapper[4798]: I1011 03:55:58.036762 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:58 crc kubenswrapper[4798]: I1011 03:55:58.036839 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:58 crc kubenswrapper[4798]: I1011 03:55:58.036859 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:58 crc kubenswrapper[4798]: I1011 03:55:58.036892 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:58 crc kubenswrapper[4798]: I1011 03:55:58.036912 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:58Z","lastTransitionTime":"2025-10-11T03:55:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:58 crc kubenswrapper[4798]: I1011 03:55:58.140158 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:58 crc kubenswrapper[4798]: I1011 03:55:58.140306 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:58 crc kubenswrapper[4798]: I1011 03:55:58.140325 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:58 crc kubenswrapper[4798]: I1011 03:55:58.140351 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:58 crc kubenswrapper[4798]: I1011 03:55:58.140369 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:58Z","lastTransitionTime":"2025-10-11T03:55:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:58 crc kubenswrapper[4798]: I1011 03:55:58.243457 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:58 crc kubenswrapper[4798]: I1011 03:55:58.243533 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:58 crc kubenswrapper[4798]: I1011 03:55:58.243552 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:58 crc kubenswrapper[4798]: I1011 03:55:58.243581 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:58 crc kubenswrapper[4798]: I1011 03:55:58.243599 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:58Z","lastTransitionTime":"2025-10-11T03:55:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:58 crc kubenswrapper[4798]: I1011 03:55:58.347007 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:58 crc kubenswrapper[4798]: I1011 03:55:58.347062 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:58 crc kubenswrapper[4798]: I1011 03:55:58.347074 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:58 crc kubenswrapper[4798]: I1011 03:55:58.347098 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:58 crc kubenswrapper[4798]: I1011 03:55:58.347139 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:58Z","lastTransitionTime":"2025-10-11T03:55:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:58 crc kubenswrapper[4798]: I1011 03:55:58.450356 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:58 crc kubenswrapper[4798]: I1011 03:55:58.450672 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:58 crc kubenswrapper[4798]: I1011 03:55:58.450699 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:58 crc kubenswrapper[4798]: I1011 03:55:58.450733 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:58 crc kubenswrapper[4798]: I1011 03:55:58.450758 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:58Z","lastTransitionTime":"2025-10-11T03:55:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:58 crc kubenswrapper[4798]: I1011 03:55:58.553083 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:58 crc kubenswrapper[4798]: I1011 03:55:58.553120 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:58 crc kubenswrapper[4798]: I1011 03:55:58.553130 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:58 crc kubenswrapper[4798]: I1011 03:55:58.553148 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:58 crc kubenswrapper[4798]: I1011 03:55:58.553158 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:58Z","lastTransitionTime":"2025-10-11T03:55:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:58 crc kubenswrapper[4798]: I1011 03:55:58.655870 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:58 crc kubenswrapper[4798]: I1011 03:55:58.655956 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:58 crc kubenswrapper[4798]: I1011 03:55:58.655970 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:58 crc kubenswrapper[4798]: I1011 03:55:58.655986 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:58 crc kubenswrapper[4798]: I1011 03:55:58.655998 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:58Z","lastTransitionTime":"2025-10-11T03:55:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:58 crc kubenswrapper[4798]: I1011 03:55:58.758692 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:58 crc kubenswrapper[4798]: I1011 03:55:58.758749 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:58 crc kubenswrapper[4798]: I1011 03:55:58.758761 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:58 crc kubenswrapper[4798]: I1011 03:55:58.758779 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:58 crc kubenswrapper[4798]: I1011 03:55:58.758831 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:58Z","lastTransitionTime":"2025-10-11T03:55:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:58 crc kubenswrapper[4798]: I1011 03:55:58.861805 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:58 crc kubenswrapper[4798]: I1011 03:55:58.861858 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:58 crc kubenswrapper[4798]: I1011 03:55:58.861871 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:58 crc kubenswrapper[4798]: I1011 03:55:58.861890 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:58 crc kubenswrapper[4798]: I1011 03:55:58.861902 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:58Z","lastTransitionTime":"2025-10-11T03:55:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:58 crc kubenswrapper[4798]: I1011 03:55:58.964459 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:58 crc kubenswrapper[4798]: I1011 03:55:58.964526 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:58 crc kubenswrapper[4798]: I1011 03:55:58.964542 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:58 crc kubenswrapper[4798]: I1011 03:55:58.964563 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:58 crc kubenswrapper[4798]: I1011 03:55:58.964585 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:58Z","lastTransitionTime":"2025-10-11T03:55:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:59 crc kubenswrapper[4798]: I1011 03:55:59.067303 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:59 crc kubenswrapper[4798]: I1011 03:55:59.067369 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:59 crc kubenswrapper[4798]: I1011 03:55:59.067384 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:59 crc kubenswrapper[4798]: I1011 03:55:59.067435 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:59 crc kubenswrapper[4798]: I1011 03:55:59.067450 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:59Z","lastTransitionTime":"2025-10-11T03:55:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:59 crc kubenswrapper[4798]: I1011 03:55:59.171523 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:59 crc kubenswrapper[4798]: I1011 03:55:59.171563 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:59 crc kubenswrapper[4798]: I1011 03:55:59.171573 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:59 crc kubenswrapper[4798]: I1011 03:55:59.171588 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:59 crc kubenswrapper[4798]: I1011 03:55:59.171597 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:59Z","lastTransitionTime":"2025-10-11T03:55:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:59 crc kubenswrapper[4798]: I1011 03:55:59.273903 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:59 crc kubenswrapper[4798]: I1011 03:55:59.273975 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:59 crc kubenswrapper[4798]: I1011 03:55:59.273994 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:59 crc kubenswrapper[4798]: I1011 03:55:59.274021 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:59 crc kubenswrapper[4798]: I1011 03:55:59.274040 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:59Z","lastTransitionTime":"2025-10-11T03:55:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:59 crc kubenswrapper[4798]: I1011 03:55:59.376768 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:59 crc kubenswrapper[4798]: I1011 03:55:59.376843 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:59 crc kubenswrapper[4798]: I1011 03:55:59.376867 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:59 crc kubenswrapper[4798]: I1011 03:55:59.376895 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:59 crc kubenswrapper[4798]: I1011 03:55:59.376915 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:59Z","lastTransitionTime":"2025-10-11T03:55:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:59 crc kubenswrapper[4798]: I1011 03:55:59.422622 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 11 03:55:59 crc kubenswrapper[4798]: I1011 03:55:59.422699 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 11 03:55:59 crc kubenswrapper[4798]: E1011 03:55:59.422796 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 11 03:55:59 crc kubenswrapper[4798]: E1011 03:55:59.422922 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 11 03:55:59 crc kubenswrapper[4798]: I1011 03:55:59.422638 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-5bfzt" Oct 11 03:55:59 crc kubenswrapper[4798]: E1011 03:55:59.423044 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-5bfzt" podUID="3cf2b185-9a26-4448-8fc5-4885f98daf87" Oct 11 03:55:59 crc kubenswrapper[4798]: I1011 03:55:59.423523 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 11 03:55:59 crc kubenswrapper[4798]: E1011 03:55:59.423688 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 11 03:55:59 crc kubenswrapper[4798]: I1011 03:55:59.479610 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:59 crc kubenswrapper[4798]: I1011 03:55:59.479688 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:59 crc kubenswrapper[4798]: I1011 03:55:59.479711 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:59 crc kubenswrapper[4798]: I1011 03:55:59.479741 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:59 crc kubenswrapper[4798]: I1011 03:55:59.479763 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:59Z","lastTransitionTime":"2025-10-11T03:55:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:59 crc kubenswrapper[4798]: I1011 03:55:59.582833 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:59 crc kubenswrapper[4798]: I1011 03:55:59.582903 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:59 crc kubenswrapper[4798]: I1011 03:55:59.582917 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:59 crc kubenswrapper[4798]: I1011 03:55:59.582935 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:59 crc kubenswrapper[4798]: I1011 03:55:59.582946 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:59Z","lastTransitionTime":"2025-10-11T03:55:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:59 crc kubenswrapper[4798]: I1011 03:55:59.685455 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:59 crc kubenswrapper[4798]: I1011 03:55:59.685493 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:59 crc kubenswrapper[4798]: I1011 03:55:59.685505 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:59 crc kubenswrapper[4798]: I1011 03:55:59.685520 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:59 crc kubenswrapper[4798]: I1011 03:55:59.685532 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:59Z","lastTransitionTime":"2025-10-11T03:55:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:59 crc kubenswrapper[4798]: I1011 03:55:59.788863 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:59 crc kubenswrapper[4798]: I1011 03:55:59.788930 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:59 crc kubenswrapper[4798]: I1011 03:55:59.788952 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:59 crc kubenswrapper[4798]: I1011 03:55:59.788983 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:59 crc kubenswrapper[4798]: I1011 03:55:59.789006 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:59Z","lastTransitionTime":"2025-10-11T03:55:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:59 crc kubenswrapper[4798]: I1011 03:55:59.891675 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:59 crc kubenswrapper[4798]: I1011 03:55:59.891729 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:59 crc kubenswrapper[4798]: I1011 03:55:59.891745 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:59 crc kubenswrapper[4798]: I1011 03:55:59.891763 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:59 crc kubenswrapper[4798]: I1011 03:55:59.891776 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:59Z","lastTransitionTime":"2025-10-11T03:55:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:55:59 crc kubenswrapper[4798]: I1011 03:55:59.993858 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:55:59 crc kubenswrapper[4798]: I1011 03:55:59.993892 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:55:59 crc kubenswrapper[4798]: I1011 03:55:59.993901 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:55:59 crc kubenswrapper[4798]: I1011 03:55:59.993916 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:55:59 crc kubenswrapper[4798]: I1011 03:55:59.993926 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:55:59Z","lastTransitionTime":"2025-10-11T03:55:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:00 crc kubenswrapper[4798]: I1011 03:56:00.096015 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:00 crc kubenswrapper[4798]: I1011 03:56:00.096056 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:00 crc kubenswrapper[4798]: I1011 03:56:00.096072 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:00 crc kubenswrapper[4798]: I1011 03:56:00.096088 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:00 crc kubenswrapper[4798]: I1011 03:56:00.096099 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:00Z","lastTransitionTime":"2025-10-11T03:56:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:00 crc kubenswrapper[4798]: I1011 03:56:00.199143 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:00 crc kubenswrapper[4798]: I1011 03:56:00.199192 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:00 crc kubenswrapper[4798]: I1011 03:56:00.199204 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:00 crc kubenswrapper[4798]: I1011 03:56:00.199220 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:00 crc kubenswrapper[4798]: I1011 03:56:00.199228 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:00Z","lastTransitionTime":"2025-10-11T03:56:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:00 crc kubenswrapper[4798]: I1011 03:56:00.301667 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:00 crc kubenswrapper[4798]: I1011 03:56:00.301744 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:00 crc kubenswrapper[4798]: I1011 03:56:00.301763 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:00 crc kubenswrapper[4798]: I1011 03:56:00.301786 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:00 crc kubenswrapper[4798]: I1011 03:56:00.301803 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:00Z","lastTransitionTime":"2025-10-11T03:56:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:00 crc kubenswrapper[4798]: I1011 03:56:00.404804 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:00 crc kubenswrapper[4798]: I1011 03:56:00.404842 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:00 crc kubenswrapper[4798]: I1011 03:56:00.404852 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:00 crc kubenswrapper[4798]: I1011 03:56:00.404871 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:00 crc kubenswrapper[4798]: I1011 03:56:00.404887 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:00Z","lastTransitionTime":"2025-10-11T03:56:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:00 crc kubenswrapper[4798]: I1011 03:56:00.508161 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:00 crc kubenswrapper[4798]: I1011 03:56:00.508229 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:00 crc kubenswrapper[4798]: I1011 03:56:00.508245 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:00 crc kubenswrapper[4798]: I1011 03:56:00.508270 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:00 crc kubenswrapper[4798]: I1011 03:56:00.508287 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:00Z","lastTransitionTime":"2025-10-11T03:56:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:00 crc kubenswrapper[4798]: I1011 03:56:00.611587 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:00 crc kubenswrapper[4798]: I1011 03:56:00.611629 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:00 crc kubenswrapper[4798]: I1011 03:56:00.611638 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:00 crc kubenswrapper[4798]: I1011 03:56:00.611652 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:00 crc kubenswrapper[4798]: I1011 03:56:00.611662 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:00Z","lastTransitionTime":"2025-10-11T03:56:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:00 crc kubenswrapper[4798]: I1011 03:56:00.714509 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:00 crc kubenswrapper[4798]: I1011 03:56:00.714571 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:00 crc kubenswrapper[4798]: I1011 03:56:00.714588 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:00 crc kubenswrapper[4798]: I1011 03:56:00.714613 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:00 crc kubenswrapper[4798]: I1011 03:56:00.714630 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:00Z","lastTransitionTime":"2025-10-11T03:56:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:00 crc kubenswrapper[4798]: I1011 03:56:00.818224 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:00 crc kubenswrapper[4798]: I1011 03:56:00.818286 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:00 crc kubenswrapper[4798]: I1011 03:56:00.818305 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:00 crc kubenswrapper[4798]: I1011 03:56:00.818327 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:00 crc kubenswrapper[4798]: I1011 03:56:00.818345 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:00Z","lastTransitionTime":"2025-10-11T03:56:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:00 crc kubenswrapper[4798]: I1011 03:56:00.920810 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:00 crc kubenswrapper[4798]: I1011 03:56:00.920850 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:00 crc kubenswrapper[4798]: I1011 03:56:00.920858 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:00 crc kubenswrapper[4798]: I1011 03:56:00.920870 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:00 crc kubenswrapper[4798]: I1011 03:56:00.920879 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:00Z","lastTransitionTime":"2025-10-11T03:56:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:01 crc kubenswrapper[4798]: I1011 03:56:01.023763 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:01 crc kubenswrapper[4798]: I1011 03:56:01.023817 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:01 crc kubenswrapper[4798]: I1011 03:56:01.023835 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:01 crc kubenswrapper[4798]: I1011 03:56:01.023858 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:01 crc kubenswrapper[4798]: I1011 03:56:01.023873 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:01Z","lastTransitionTime":"2025-10-11T03:56:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:01 crc kubenswrapper[4798]: I1011 03:56:01.126269 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:01 crc kubenswrapper[4798]: I1011 03:56:01.126326 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:01 crc kubenswrapper[4798]: I1011 03:56:01.126342 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:01 crc kubenswrapper[4798]: I1011 03:56:01.126360 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:01 crc kubenswrapper[4798]: I1011 03:56:01.126373 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:01Z","lastTransitionTime":"2025-10-11T03:56:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:01 crc kubenswrapper[4798]: I1011 03:56:01.228860 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:01 crc kubenswrapper[4798]: I1011 03:56:01.228907 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:01 crc kubenswrapper[4798]: I1011 03:56:01.228916 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:01 crc kubenswrapper[4798]: I1011 03:56:01.228930 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:01 crc kubenswrapper[4798]: I1011 03:56:01.228939 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:01Z","lastTransitionTime":"2025-10-11T03:56:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:01 crc kubenswrapper[4798]: I1011 03:56:01.331553 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:01 crc kubenswrapper[4798]: I1011 03:56:01.331597 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:01 crc kubenswrapper[4798]: I1011 03:56:01.331606 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:01 crc kubenswrapper[4798]: I1011 03:56:01.331621 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:01 crc kubenswrapper[4798]: I1011 03:56:01.331631 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:01Z","lastTransitionTime":"2025-10-11T03:56:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:01 crc kubenswrapper[4798]: I1011 03:56:01.423000 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 11 03:56:01 crc kubenswrapper[4798]: I1011 03:56:01.423055 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 11 03:56:01 crc kubenswrapper[4798]: I1011 03:56:01.423077 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-5bfzt" Oct 11 03:56:01 crc kubenswrapper[4798]: E1011 03:56:01.423211 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 11 03:56:01 crc kubenswrapper[4798]: I1011 03:56:01.423225 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 11 03:56:01 crc kubenswrapper[4798]: E1011 03:56:01.423369 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 11 03:56:01 crc kubenswrapper[4798]: E1011 03:56:01.423424 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 11 03:56:01 crc kubenswrapper[4798]: E1011 03:56:01.423525 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-5bfzt" podUID="3cf2b185-9a26-4448-8fc5-4885f98daf87" Oct 11 03:56:01 crc kubenswrapper[4798]: I1011 03:56:01.433843 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:01 crc kubenswrapper[4798]: I1011 03:56:01.433875 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:01 crc kubenswrapper[4798]: I1011 03:56:01.433885 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:01 crc kubenswrapper[4798]: I1011 03:56:01.433902 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:01 crc kubenswrapper[4798]: I1011 03:56:01.433914 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:01Z","lastTransitionTime":"2025-10-11T03:56:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:01 crc kubenswrapper[4798]: I1011 03:56:01.538030 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:01 crc kubenswrapper[4798]: I1011 03:56:01.538076 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:01 crc kubenswrapper[4798]: I1011 03:56:01.538087 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:01 crc kubenswrapper[4798]: I1011 03:56:01.538101 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:01 crc kubenswrapper[4798]: I1011 03:56:01.538109 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:01Z","lastTransitionTime":"2025-10-11T03:56:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:01 crc kubenswrapper[4798]: I1011 03:56:01.646310 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:01 crc kubenswrapper[4798]: I1011 03:56:01.646354 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:01 crc kubenswrapper[4798]: I1011 03:56:01.646365 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:01 crc kubenswrapper[4798]: I1011 03:56:01.646382 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:01 crc kubenswrapper[4798]: I1011 03:56:01.646417 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:01Z","lastTransitionTime":"2025-10-11T03:56:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:01 crc kubenswrapper[4798]: I1011 03:56:01.748873 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:01 crc kubenswrapper[4798]: I1011 03:56:01.749210 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:01 crc kubenswrapper[4798]: I1011 03:56:01.749289 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:01 crc kubenswrapper[4798]: I1011 03:56:01.749353 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:01 crc kubenswrapper[4798]: I1011 03:56:01.749454 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:01Z","lastTransitionTime":"2025-10-11T03:56:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:01 crc kubenswrapper[4798]: I1011 03:56:01.852379 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:01 crc kubenswrapper[4798]: I1011 03:56:01.852436 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:01 crc kubenswrapper[4798]: I1011 03:56:01.852450 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:01 crc kubenswrapper[4798]: I1011 03:56:01.852465 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:01 crc kubenswrapper[4798]: I1011 03:56:01.852479 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:01Z","lastTransitionTime":"2025-10-11T03:56:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:01 crc kubenswrapper[4798]: I1011 03:56:01.955118 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:01 crc kubenswrapper[4798]: I1011 03:56:01.955161 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:01 crc kubenswrapper[4798]: I1011 03:56:01.955173 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:01 crc kubenswrapper[4798]: I1011 03:56:01.955197 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:01 crc kubenswrapper[4798]: I1011 03:56:01.955210 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:01Z","lastTransitionTime":"2025-10-11T03:56:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:02 crc kubenswrapper[4798]: I1011 03:56:02.058204 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:02 crc kubenswrapper[4798]: I1011 03:56:02.058257 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:02 crc kubenswrapper[4798]: I1011 03:56:02.058269 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:02 crc kubenswrapper[4798]: I1011 03:56:02.058287 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:02 crc kubenswrapper[4798]: I1011 03:56:02.058302 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:02Z","lastTransitionTime":"2025-10-11T03:56:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:02 crc kubenswrapper[4798]: I1011 03:56:02.160568 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:02 crc kubenswrapper[4798]: I1011 03:56:02.160651 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:02 crc kubenswrapper[4798]: I1011 03:56:02.160663 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:02 crc kubenswrapper[4798]: I1011 03:56:02.160676 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:02 crc kubenswrapper[4798]: I1011 03:56:02.160685 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:02Z","lastTransitionTime":"2025-10-11T03:56:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:02 crc kubenswrapper[4798]: I1011 03:56:02.263016 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:02 crc kubenswrapper[4798]: I1011 03:56:02.263065 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:02 crc kubenswrapper[4798]: I1011 03:56:02.263077 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:02 crc kubenswrapper[4798]: I1011 03:56:02.263094 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:02 crc kubenswrapper[4798]: I1011 03:56:02.263105 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:02Z","lastTransitionTime":"2025-10-11T03:56:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:02 crc kubenswrapper[4798]: I1011 03:56:02.318818 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:02 crc kubenswrapper[4798]: I1011 03:56:02.318861 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:02 crc kubenswrapper[4798]: I1011 03:56:02.318871 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:02 crc kubenswrapper[4798]: I1011 03:56:02.318886 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:02 crc kubenswrapper[4798]: I1011 03:56:02.318898 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:02Z","lastTransitionTime":"2025-10-11T03:56:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:02 crc kubenswrapper[4798]: E1011 03:56:02.335654 4798 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T03:56:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T03:56:02Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T03:56:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T03:56:02Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T03:56:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T03:56:02Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T03:56:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T03:56:02Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"319d317c-5958-4e72-8ec1-75f04665505c\\\",\\\"systemUUID\\\":\\\"914a85fd-a642-4471-b13e-f33764f494b0\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:02Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:02 crc kubenswrapper[4798]: I1011 03:56:02.339476 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:02 crc kubenswrapper[4798]: I1011 03:56:02.339516 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:02 crc kubenswrapper[4798]: I1011 03:56:02.339527 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:02 crc kubenswrapper[4798]: I1011 03:56:02.339541 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:02 crc kubenswrapper[4798]: I1011 03:56:02.339552 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:02Z","lastTransitionTime":"2025-10-11T03:56:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:02 crc kubenswrapper[4798]: E1011 03:56:02.353300 4798 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T03:56:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T03:56:02Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T03:56:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T03:56:02Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T03:56:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T03:56:02Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T03:56:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T03:56:02Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"319d317c-5958-4e72-8ec1-75f04665505c\\\",\\\"systemUUID\\\":\\\"914a85fd-a642-4471-b13e-f33764f494b0\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:02Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:02 crc kubenswrapper[4798]: I1011 03:56:02.357119 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:02 crc kubenswrapper[4798]: I1011 03:56:02.357146 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:02 crc kubenswrapper[4798]: I1011 03:56:02.357156 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:02 crc kubenswrapper[4798]: I1011 03:56:02.357172 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:02 crc kubenswrapper[4798]: I1011 03:56:02.357184 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:02Z","lastTransitionTime":"2025-10-11T03:56:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:02 crc kubenswrapper[4798]: E1011 03:56:02.394720 4798 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T03:56:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T03:56:02Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T03:56:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T03:56:02Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T03:56:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T03:56:02Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T03:56:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T03:56:02Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"319d317c-5958-4e72-8ec1-75f04665505c\\\",\\\"systemUUID\\\":\\\"914a85fd-a642-4471-b13e-f33764f494b0\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:02Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:02 crc kubenswrapper[4798]: I1011 03:56:02.399144 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:02 crc kubenswrapper[4798]: I1011 03:56:02.399195 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:02 crc kubenswrapper[4798]: I1011 03:56:02.399208 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:02 crc kubenswrapper[4798]: I1011 03:56:02.399227 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:02 crc kubenswrapper[4798]: I1011 03:56:02.399240 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:02Z","lastTransitionTime":"2025-10-11T03:56:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:02 crc kubenswrapper[4798]: E1011 03:56:02.416569 4798 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T03:56:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T03:56:02Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T03:56:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T03:56:02Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T03:56:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T03:56:02Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T03:56:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T03:56:02Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"319d317c-5958-4e72-8ec1-75f04665505c\\\",\\\"systemUUID\\\":\\\"914a85fd-a642-4471-b13e-f33764f494b0\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:02Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:02 crc kubenswrapper[4798]: I1011 03:56:02.421302 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:02 crc kubenswrapper[4798]: I1011 03:56:02.421362 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:02 crc kubenswrapper[4798]: I1011 03:56:02.421379 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:02 crc kubenswrapper[4798]: I1011 03:56:02.421438 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:02 crc kubenswrapper[4798]: I1011 03:56:02.421467 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:02Z","lastTransitionTime":"2025-10-11T03:56:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:02 crc kubenswrapper[4798]: E1011 03:56:02.439233 4798 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T03:56:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T03:56:02Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T03:56:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T03:56:02Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T03:56:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T03:56:02Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T03:56:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T03:56:02Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"319d317c-5958-4e72-8ec1-75f04665505c\\\",\\\"systemUUID\\\":\\\"914a85fd-a642-4471-b13e-f33764f494b0\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:02Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:02 crc kubenswrapper[4798]: E1011 03:56:02.439521 4798 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Oct 11 03:56:02 crc kubenswrapper[4798]: I1011 03:56:02.442044 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:02 crc kubenswrapper[4798]: I1011 03:56:02.442081 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:02 crc kubenswrapper[4798]: I1011 03:56:02.442092 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:02 crc kubenswrapper[4798]: I1011 03:56:02.442107 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:02 crc kubenswrapper[4798]: I1011 03:56:02.442119 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:02Z","lastTransitionTime":"2025-10-11T03:56:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:02 crc kubenswrapper[4798]: I1011 03:56:02.545184 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:02 crc kubenswrapper[4798]: I1011 03:56:02.545234 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:02 crc kubenswrapper[4798]: I1011 03:56:02.545251 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:02 crc kubenswrapper[4798]: I1011 03:56:02.545271 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:02 crc kubenswrapper[4798]: I1011 03:56:02.545285 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:02Z","lastTransitionTime":"2025-10-11T03:56:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:02 crc kubenswrapper[4798]: I1011 03:56:02.647429 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:02 crc kubenswrapper[4798]: I1011 03:56:02.647740 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:02 crc kubenswrapper[4798]: I1011 03:56:02.647815 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:02 crc kubenswrapper[4798]: I1011 03:56:02.647905 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:02 crc kubenswrapper[4798]: I1011 03:56:02.647976 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:02Z","lastTransitionTime":"2025-10-11T03:56:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:02 crc kubenswrapper[4798]: I1011 03:56:02.750729 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:02 crc kubenswrapper[4798]: I1011 03:56:02.750769 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:02 crc kubenswrapper[4798]: I1011 03:56:02.750779 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:02 crc kubenswrapper[4798]: I1011 03:56:02.750794 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:02 crc kubenswrapper[4798]: I1011 03:56:02.750806 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:02Z","lastTransitionTime":"2025-10-11T03:56:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:02 crc kubenswrapper[4798]: I1011 03:56:02.852984 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:02 crc kubenswrapper[4798]: I1011 03:56:02.853038 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:02 crc kubenswrapper[4798]: I1011 03:56:02.853052 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:02 crc kubenswrapper[4798]: I1011 03:56:02.853071 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:02 crc kubenswrapper[4798]: I1011 03:56:02.853088 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:02Z","lastTransitionTime":"2025-10-11T03:56:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:02 crc kubenswrapper[4798]: I1011 03:56:02.955153 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:02 crc kubenswrapper[4798]: I1011 03:56:02.955455 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:02 crc kubenswrapper[4798]: I1011 03:56:02.955553 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:02 crc kubenswrapper[4798]: I1011 03:56:02.955646 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:02 crc kubenswrapper[4798]: I1011 03:56:02.955721 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:02Z","lastTransitionTime":"2025-10-11T03:56:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:03 crc kubenswrapper[4798]: I1011 03:56:03.057662 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:03 crc kubenswrapper[4798]: I1011 03:56:03.057710 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:03 crc kubenswrapper[4798]: I1011 03:56:03.057722 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:03 crc kubenswrapper[4798]: I1011 03:56:03.057743 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:03 crc kubenswrapper[4798]: I1011 03:56:03.057760 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:03Z","lastTransitionTime":"2025-10-11T03:56:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:03 crc kubenswrapper[4798]: I1011 03:56:03.160601 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:03 crc kubenswrapper[4798]: I1011 03:56:03.160980 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:03 crc kubenswrapper[4798]: I1011 03:56:03.161294 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:03 crc kubenswrapper[4798]: I1011 03:56:03.161544 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:03 crc kubenswrapper[4798]: I1011 03:56:03.161810 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:03Z","lastTransitionTime":"2025-10-11T03:56:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:03 crc kubenswrapper[4798]: I1011 03:56:03.265080 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:03 crc kubenswrapper[4798]: I1011 03:56:03.265208 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:03 crc kubenswrapper[4798]: I1011 03:56:03.265235 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:03 crc kubenswrapper[4798]: I1011 03:56:03.265277 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:03 crc kubenswrapper[4798]: I1011 03:56:03.265300 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:03Z","lastTransitionTime":"2025-10-11T03:56:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:03 crc kubenswrapper[4798]: I1011 03:56:03.369561 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:03 crc kubenswrapper[4798]: I1011 03:56:03.369623 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:03 crc kubenswrapper[4798]: I1011 03:56:03.369636 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:03 crc kubenswrapper[4798]: I1011 03:56:03.369653 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:03 crc kubenswrapper[4798]: I1011 03:56:03.369668 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:03Z","lastTransitionTime":"2025-10-11T03:56:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:03 crc kubenswrapper[4798]: I1011 03:56:03.423414 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-5bfzt" Oct 11 03:56:03 crc kubenswrapper[4798]: I1011 03:56:03.423510 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 11 03:56:03 crc kubenswrapper[4798]: E1011 03:56:03.423541 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-5bfzt" podUID="3cf2b185-9a26-4448-8fc5-4885f98daf87" Oct 11 03:56:03 crc kubenswrapper[4798]: I1011 03:56:03.423573 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 11 03:56:03 crc kubenswrapper[4798]: E1011 03:56:03.423653 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 11 03:56:03 crc kubenswrapper[4798]: I1011 03:56:03.423691 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 11 03:56:03 crc kubenswrapper[4798]: E1011 03:56:03.423719 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 11 03:56:03 crc kubenswrapper[4798]: E1011 03:56:03.423846 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 11 03:56:03 crc kubenswrapper[4798]: I1011 03:56:03.472030 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:03 crc kubenswrapper[4798]: I1011 03:56:03.472069 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:03 crc kubenswrapper[4798]: I1011 03:56:03.472077 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:03 crc kubenswrapper[4798]: I1011 03:56:03.472092 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:03 crc kubenswrapper[4798]: I1011 03:56:03.472101 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:03Z","lastTransitionTime":"2025-10-11T03:56:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:03 crc kubenswrapper[4798]: I1011 03:56:03.590483 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:03 crc kubenswrapper[4798]: I1011 03:56:03.590556 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:03 crc kubenswrapper[4798]: I1011 03:56:03.590569 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:03 crc kubenswrapper[4798]: I1011 03:56:03.590586 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:03 crc kubenswrapper[4798]: I1011 03:56:03.590598 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:03Z","lastTransitionTime":"2025-10-11T03:56:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:03 crc kubenswrapper[4798]: I1011 03:56:03.693102 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:03 crc kubenswrapper[4798]: I1011 03:56:03.693165 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:03 crc kubenswrapper[4798]: I1011 03:56:03.693179 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:03 crc kubenswrapper[4798]: I1011 03:56:03.693194 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:03 crc kubenswrapper[4798]: I1011 03:56:03.693204 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:03Z","lastTransitionTime":"2025-10-11T03:56:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:03 crc kubenswrapper[4798]: I1011 03:56:03.797350 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:03 crc kubenswrapper[4798]: I1011 03:56:03.797454 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:03 crc kubenswrapper[4798]: I1011 03:56:03.797505 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:03 crc kubenswrapper[4798]: I1011 03:56:03.797540 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:03 crc kubenswrapper[4798]: I1011 03:56:03.797565 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:03Z","lastTransitionTime":"2025-10-11T03:56:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:03 crc kubenswrapper[4798]: I1011 03:56:03.900381 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:03 crc kubenswrapper[4798]: I1011 03:56:03.900455 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:03 crc kubenswrapper[4798]: I1011 03:56:03.900465 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:03 crc kubenswrapper[4798]: I1011 03:56:03.900479 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:03 crc kubenswrapper[4798]: I1011 03:56:03.900487 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:03Z","lastTransitionTime":"2025-10-11T03:56:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:04 crc kubenswrapper[4798]: I1011 03:56:04.003997 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:04 crc kubenswrapper[4798]: I1011 03:56:04.004078 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:04 crc kubenswrapper[4798]: I1011 03:56:04.004098 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:04 crc kubenswrapper[4798]: I1011 03:56:04.004149 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:04 crc kubenswrapper[4798]: I1011 03:56:04.004167 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:04Z","lastTransitionTime":"2025-10-11T03:56:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:04 crc kubenswrapper[4798]: I1011 03:56:04.106800 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:04 crc kubenswrapper[4798]: I1011 03:56:04.106871 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:04 crc kubenswrapper[4798]: I1011 03:56:04.106897 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:04 crc kubenswrapper[4798]: I1011 03:56:04.106925 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:04 crc kubenswrapper[4798]: I1011 03:56:04.106948 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:04Z","lastTransitionTime":"2025-10-11T03:56:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:04 crc kubenswrapper[4798]: I1011 03:56:04.209540 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:04 crc kubenswrapper[4798]: I1011 03:56:04.209598 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:04 crc kubenswrapper[4798]: I1011 03:56:04.209619 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:04 crc kubenswrapper[4798]: I1011 03:56:04.209641 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:04 crc kubenswrapper[4798]: I1011 03:56:04.209657 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:04Z","lastTransitionTime":"2025-10-11T03:56:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:04 crc kubenswrapper[4798]: I1011 03:56:04.312444 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:04 crc kubenswrapper[4798]: I1011 03:56:04.312479 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:04 crc kubenswrapper[4798]: I1011 03:56:04.312490 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:04 crc kubenswrapper[4798]: I1011 03:56:04.312506 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:04 crc kubenswrapper[4798]: I1011 03:56:04.312517 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:04Z","lastTransitionTime":"2025-10-11T03:56:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:04 crc kubenswrapper[4798]: I1011 03:56:04.414586 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:04 crc kubenswrapper[4798]: I1011 03:56:04.414844 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:04 crc kubenswrapper[4798]: I1011 03:56:04.414918 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:04 crc kubenswrapper[4798]: I1011 03:56:04.415033 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:04 crc kubenswrapper[4798]: I1011 03:56:04.415152 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:04Z","lastTransitionTime":"2025-10-11T03:56:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:04 crc kubenswrapper[4798]: I1011 03:56:04.517287 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:04 crc kubenswrapper[4798]: I1011 03:56:04.517348 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:04 crc kubenswrapper[4798]: I1011 03:56:04.517361 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:04 crc kubenswrapper[4798]: I1011 03:56:04.517415 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:04 crc kubenswrapper[4798]: I1011 03:56:04.517431 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:04Z","lastTransitionTime":"2025-10-11T03:56:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:04 crc kubenswrapper[4798]: I1011 03:56:04.619730 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:04 crc kubenswrapper[4798]: I1011 03:56:04.619777 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:04 crc kubenswrapper[4798]: I1011 03:56:04.619792 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:04 crc kubenswrapper[4798]: I1011 03:56:04.619813 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:04 crc kubenswrapper[4798]: I1011 03:56:04.619831 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:04Z","lastTransitionTime":"2025-10-11T03:56:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:04 crc kubenswrapper[4798]: I1011 03:56:04.723115 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:04 crc kubenswrapper[4798]: I1011 03:56:04.723162 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:04 crc kubenswrapper[4798]: I1011 03:56:04.723178 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:04 crc kubenswrapper[4798]: I1011 03:56:04.723200 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:04 crc kubenswrapper[4798]: I1011 03:56:04.723216 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:04Z","lastTransitionTime":"2025-10-11T03:56:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:04 crc kubenswrapper[4798]: I1011 03:56:04.825732 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:04 crc kubenswrapper[4798]: I1011 03:56:04.825801 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:04 crc kubenswrapper[4798]: I1011 03:56:04.825818 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:04 crc kubenswrapper[4798]: I1011 03:56:04.825841 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:04 crc kubenswrapper[4798]: I1011 03:56:04.825855 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:04Z","lastTransitionTime":"2025-10-11T03:56:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:04 crc kubenswrapper[4798]: I1011 03:56:04.928661 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:04 crc kubenswrapper[4798]: I1011 03:56:04.928696 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:04 crc kubenswrapper[4798]: I1011 03:56:04.928706 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:04 crc kubenswrapper[4798]: I1011 03:56:04.928718 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:04 crc kubenswrapper[4798]: I1011 03:56:04.928728 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:04Z","lastTransitionTime":"2025-10-11T03:56:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:05 crc kubenswrapper[4798]: I1011 03:56:05.032098 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:05 crc kubenswrapper[4798]: I1011 03:56:05.032154 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:05 crc kubenswrapper[4798]: I1011 03:56:05.032169 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:05 crc kubenswrapper[4798]: I1011 03:56:05.032191 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:05 crc kubenswrapper[4798]: I1011 03:56:05.032208 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:05Z","lastTransitionTime":"2025-10-11T03:56:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:05 crc kubenswrapper[4798]: I1011 03:56:05.134280 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:05 crc kubenswrapper[4798]: I1011 03:56:05.134325 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:05 crc kubenswrapper[4798]: I1011 03:56:05.134337 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:05 crc kubenswrapper[4798]: I1011 03:56:05.134354 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:05 crc kubenswrapper[4798]: I1011 03:56:05.134378 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:05Z","lastTransitionTime":"2025-10-11T03:56:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:05 crc kubenswrapper[4798]: I1011 03:56:05.237012 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:05 crc kubenswrapper[4798]: I1011 03:56:05.237050 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:05 crc kubenswrapper[4798]: I1011 03:56:05.237061 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:05 crc kubenswrapper[4798]: I1011 03:56:05.237077 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:05 crc kubenswrapper[4798]: I1011 03:56:05.237087 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:05Z","lastTransitionTime":"2025-10-11T03:56:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:05 crc kubenswrapper[4798]: I1011 03:56:05.339610 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:05 crc kubenswrapper[4798]: I1011 03:56:05.339668 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:05 crc kubenswrapper[4798]: I1011 03:56:05.339678 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:05 crc kubenswrapper[4798]: I1011 03:56:05.339690 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:05 crc kubenswrapper[4798]: I1011 03:56:05.339699 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:05Z","lastTransitionTime":"2025-10-11T03:56:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:05 crc kubenswrapper[4798]: I1011 03:56:05.422804 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 11 03:56:05 crc kubenswrapper[4798]: E1011 03:56:05.423183 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 11 03:56:05 crc kubenswrapper[4798]: I1011 03:56:05.424111 4798 scope.go:117] "RemoveContainer" containerID="a9f55c50b0f40e90d5078225583f31860d2cfb67776e6b11ebdc9a41d8b20baf" Oct 11 03:56:05 crc kubenswrapper[4798]: E1011 03:56:05.424240 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-svt4z_openshift-ovn-kubernetes(3c2a342b-5252-4957-9e2c-8f81c304b8af)\"" pod="openshift-ovn-kubernetes/ovnkube-node-svt4z" podUID="3c2a342b-5252-4957-9e2c-8f81c304b8af" Oct 11 03:56:05 crc kubenswrapper[4798]: I1011 03:56:05.425569 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 11 03:56:05 crc kubenswrapper[4798]: E1011 03:56:05.425692 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 11 03:56:05 crc kubenswrapper[4798]: I1011 03:56:05.425774 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 11 03:56:05 crc kubenswrapper[4798]: E1011 03:56:05.425841 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 11 03:56:05 crc kubenswrapper[4798]: I1011 03:56:05.425904 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-5bfzt" Oct 11 03:56:05 crc kubenswrapper[4798]: E1011 03:56:05.425952 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-5bfzt" podUID="3cf2b185-9a26-4448-8fc5-4885f98daf87" Oct 11 03:56:05 crc kubenswrapper[4798]: I1011 03:56:05.439137 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:05Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:05 crc kubenswrapper[4798]: I1011 03:56:05.441754 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:05 crc kubenswrapper[4798]: I1011 03:56:05.441782 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:05 crc kubenswrapper[4798]: I1011 03:56:05.441791 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:05 crc kubenswrapper[4798]: I1011 03:56:05.441804 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:05 crc kubenswrapper[4798]: I1011 03:56:05.441813 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:05Z","lastTransitionTime":"2025-10-11T03:56:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:05 crc kubenswrapper[4798]: I1011 03:56:05.486718 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42571bc8-2186-4e3b-bba9-28f5a8f364d0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c66c40f71f0877eee058976f94625fe1c4bcab5dfad363be39e6863c2e9f07a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5gdw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d4862faa12fa49bd57098b8b4e9e9dd287da8c8808a9ca32bfdd3a9b4313945d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5gdw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-h28s2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:05Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:05 crc kubenswrapper[4798]: I1011 03:56:05.515186 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-7fk5l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4b6a179c-1d23-4cb2-91b9-d34496a74456\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5bfabccc875a12072ad81dc6bf74d849c99baa004c10d213f798c363248137db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b908ba64c3d6d1d45e1cab82e69176c3c84bf88dde3e9425fc7c3b4c84685ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8b908ba64c3d6d1d45e1cab82e69176c3c84bf88dde3e9425fc7c3b4c84685ed\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae38c0781a4a8367bba95648253e72409e06cdc39825b137084fa6f4d33a4cb7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ae38c0781a4a8367bba95648253e72409e06cdc39825b137084fa6f4d33a4cb7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b663ffff34d80940880b3959c81519084b55bd5edcedbce9402d5fad1f898034\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b663ffff34d80940880b3959c81519084b55bd5edcedbce9402d5fad1f898034\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5b4df13fbe7bbb9ba49f772c2bd4bfe6a10a17bfa12bc500f0647659be7b0cdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5b4df13fbe7bbb9ba49f772c2bd4bfe6a10a17bfa12bc500f0647659be7b0cdf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://997fd5172ccd393f7befe4343619dd30eba136428035ac9fc71f0ec99791a53b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://997fd5172ccd393f7befe4343619dd30eba136428035ac9fc71f0ec99791a53b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://20198b637812738041fd2e57cdb24097b8f92af726b1ad103fee423469a27136\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://20198b637812738041fd2e57cdb24097b8f92af726b1ad103fee423469a27136\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-7fk5l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:05Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:05 crc kubenswrapper[4798]: I1011 03:56:05.531757 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-svt4z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3c2a342b-5252-4957-9e2c-8f81c304b8af\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5a66de04a274f07f090f66d9da727e1985f9c171038de366b409a0300d1b6cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5667e3d7288833ff8c576f64f111c3a2f59619a3577f969d0afdd633962bd30d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dd63d88fe2ebe6035765ee3f3c05c2ef747f9b2869da239825c08667935c9fb5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://671945e25068ff79ab8574dd96b2d81646b8240da41f34a10aa857f0c2520df2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://337fb496a29d8027f6400b57b1625b68029a16e368139f753b9b94df22448477\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c9529e6c53d42e111799753c41ef620b382b2a2b1991aa71d4ee846029a5136\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a9f55c50b0f40e90d5078225583f31860d2cfb67776e6b11ebdc9a41d8b20baf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a9f55c50b0f40e90d5078225583f31860d2cfb67776e6b11ebdc9a41d8b20baf\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-11T03:55:52Z\\\",\\\"message\\\":\\\"UUIDName:}]\\\\nI1011 03:55:52.357804 6446 model_client.go:398] Mutate operations generated as: [{Op:mutate Table:Logical_Router Row:map[] Rows:[] Columns:[] Mutations:[{Column:nat Mutator:insert Value:{GoSet:[{GoUUID:73135118-cf1b-4568-bd31-2f50308bf69d}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {e3c4661a-36a6-47f0-a6c0-a4ee741f2224}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1011 03:55:52.356105 6446 obj_retry.go:303] Retry object setup: *v1.Pod openshift-kube-apiserver/kube-apiserver-crc\\\\nI1011 03:55:52.357905 6446 obj_retry.go:365] Adding new object: *v1.Pod openshift-kube-apiserver/kube-apiserver-crc\\\\nI1011 03:55:52.357951 6446 ovn.go:134] Ensuring zone local for Pod openshift-kube-apiserver/kube-apiserver-crc in node crc\\\\nI1011 03:55:52.357979 6446 obj_retry.go:386] Retry successful for *v1.Pod openshift-kube-apiserver/kube-apiserver-crc after 0 failed attempt(s)\\\\nI1011 03:55:52.358011 6446 default_network_controller.go:776] Recording success event on pod openshift-kube-apiserver/kube-apiserver-crc\\\\nI1011 03:55:52.358029 6446 services_controller.go:356] Processing sync for service openshift-ingress-operator/metrics for network=default\\\\nF1011 03:55:52.357999 6446 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:51Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-svt4z_openshift-ovn-kubernetes(3c2a342b-5252-4957-9e2c-8f81c304b8af)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://732433bbe785758ed4cbedf667608c4780a5d1e7e2eafd432735f0d74ea01d48\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9f8c139ffa0a5dc24b40f6d5bcc2fa9422f58b275692d3ce3ba5a928c1e454a7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f8c139ffa0a5dc24b40f6d5bcc2fa9422f58b275692d3ce3ba5a928c1e454a7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:26Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-svt4z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:05Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:05 crc kubenswrapper[4798]: I1011 03:56:05.541604 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-cwk9h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a97c4ffd-df63-4ce6-8903-80849a23a9fa\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3dc58b162f1d2b3447282c149ec7cd6d60b4c53fa3cf51dfb4a821d115c9a2c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pfvbf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:28Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-cwk9h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:05Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:05 crc kubenswrapper[4798]: I1011 03:56:05.544078 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:05 crc kubenswrapper[4798]: I1011 03:56:05.544130 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:05 crc kubenswrapper[4798]: I1011 03:56:05.544140 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:05 crc kubenswrapper[4798]: I1011 03:56:05.544154 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:05 crc kubenswrapper[4798]: I1011 03:56:05.544163 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:05Z","lastTransitionTime":"2025-10-11T03:56:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:05 crc kubenswrapper[4798]: I1011 03:56:05.553926 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bfd670e2-5a6d-4d56-97d6-c72b5474cfe6\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d79289a0b2f4d9eee836f57ba898bd329060194772e80855547f1982371b0921\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1970a648bf4ee991b9b58bfade524bee260b45f727cba5864a6545a10487b3ae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a79edde80930f3aad632c14d6ad6b3df6f8449f32107a82eb906785e873e1ab7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4b8e09f509a9f25922d49024be407873d79d01edb604193f9d19fcd6c89cf96d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://805f894634e76814dc3071315ddd3d8ac46cdf65a12e97f4ca56b98fe53ab25a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-11T03:55:19Z\\\",\\\"message\\\":\\\"W1011 03:55:08.564227 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1011 03:55:08.564630 1 crypto.go:601] Generating new CA for check-endpoints-signer@1760154908 cert, and key in /tmp/serving-cert-1527351980/serving-signer.crt, /tmp/serving-cert-1527351980/serving-signer.key\\\\nI1011 03:55:08.869630 1 observer_polling.go:159] Starting file observer\\\\nW1011 03:55:08.874583 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1011 03:55:08.874721 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1011 03:55:08.878058 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1527351980/tls.crt::/tmp/serving-cert-1527351980/tls.key\\\\\\\"\\\\nF1011 03:55:19.813421 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:08Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://58ca7c079d81ba990c4508c024d6db7c3503be1d628477798459b32e2374ef22\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:08Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1171e7b719c532d99d1d222fc5dbc434060d5d149bd2b6b3dea02f9716816b26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1171e7b719c532d99d1d222fc5dbc434060d5d149bd2b6b3dea02f9716816b26\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:05Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:05Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:05 crc kubenswrapper[4798]: I1011 03:56:05.566932 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2f4f8842-e378-44a9-b919-b1bf9a8df80e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://903e13c07c622087e30f21ffff2b60c31c852d8e994c23d7af6beab5a71bb873\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1087e7b8de486a192155ed94497edbca0dc98e4a84a577267fb132fc48e6b142\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://08a33c11cee6a48ebcf66f251111b8bd888bd40fe6b52f43a3768eac817ed59f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eff419f507fc5362e956419c6fd14dc6a161f7297b026a5ec449647051f98767\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:05Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:05Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:05 crc kubenswrapper[4798]: I1011 03:56:05.577983 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4936cbe98df337a2b4d0d6e3effe0afd990facff65daf923c475c9108e05fef9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:05Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:05 crc kubenswrapper[4798]: I1011 03:56:05.589144 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:05Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:05 crc kubenswrapper[4798]: I1011 03:56:05.599482 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kszgd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e999fc8d-dd8d-415f-b72b-1c6189cf18a9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a92a11778001614d226c95af9dde787c78232568c3b5c073618a843f1225b21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h6wm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kszgd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:05Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:05 crc kubenswrapper[4798]: I1011 03:56:05.613908 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:05Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:05 crc kubenswrapper[4798]: I1011 03:56:05.627251 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d9be402e-40bb-4e2d-be8b-349646c34f1a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6897a04b7204031c861d74f6a4d475af9e9b88220cf02bfa50e7845c85cd5f70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b88dcbb90798e97c13f7e7ef7bc7171bb1dc33cf488bd453a8d263a02093dfd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b2fb58514dfb39402d4e10ca699975671a3c53a7fe54b374a987efa8ba401ccf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5844d6ac6f40fe8f9056708ee6ffe64a95b58a4dd50582e455860ce6e5628cd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5844d6ac6f40fe8f9056708ee6ffe64a95b58a4dd50582e455860ce6e5628cd3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:06Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:05Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:05Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:05 crc kubenswrapper[4798]: I1011 03:56:05.640131 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5ec61df088ad49a0392264c33da84b7324c4aa608201d71e309401ab80013fb8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://10697a88b117962307b632f70354acdfd493e92c2909908a31c90c754dcbc0ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:05Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:05 crc kubenswrapper[4798]: I1011 03:56:05.646267 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:05 crc kubenswrapper[4798]: I1011 03:56:05.646306 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:05 crc kubenswrapper[4798]: I1011 03:56:05.646318 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:05 crc kubenswrapper[4798]: I1011 03:56:05.646335 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:05 crc kubenswrapper[4798]: I1011 03:56:05.646347 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:05Z","lastTransitionTime":"2025-10-11T03:56:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:05 crc kubenswrapper[4798]: I1011 03:56:05.651356 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-5bfzt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3cf2b185-9a26-4448-8fc5-4885f98daf87\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jdt7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jdt7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:39Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-5bfzt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:05Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:05 crc kubenswrapper[4798]: I1011 03:56:05.663009 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6129cc4f78a62c5fb76f4859ec954c8901954d27b7f05672e97d992e0c933191\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:05Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:05 crc kubenswrapper[4798]: I1011 03:56:05.675280 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-p6xdd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bd9c0e44-3329-422a-907b-e9e9bb6194cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a46e4bfff29af67bd6304e68df8fb4aa7e2184648b85398f453fde878b27d825\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p9wst\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-p6xdd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:05Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:05 crc kubenswrapper[4798]: I1011 03:56:05.687095 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-5sxhs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"89cbb20d-9fdb-4fed-9ad2-acb1207afc46\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a3655b5826d719e04f0972221ea72554a15025acffa8e2689b4b2855d6151d9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z7jvd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f0c589033410abafbc39c6a7386343c70bb92ed0a1d7336188a44cc6e4fd65c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z7jvd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-5sxhs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:05Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:05 crc kubenswrapper[4798]: I1011 03:56:05.751246 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:05 crc kubenswrapper[4798]: I1011 03:56:05.751291 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:05 crc kubenswrapper[4798]: I1011 03:56:05.751302 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:05 crc kubenswrapper[4798]: I1011 03:56:05.751317 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:05 crc kubenswrapper[4798]: I1011 03:56:05.751329 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:05Z","lastTransitionTime":"2025-10-11T03:56:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:05 crc kubenswrapper[4798]: I1011 03:56:05.853122 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:05 crc kubenswrapper[4798]: I1011 03:56:05.853161 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:05 crc kubenswrapper[4798]: I1011 03:56:05.853170 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:05 crc kubenswrapper[4798]: I1011 03:56:05.853185 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:05 crc kubenswrapper[4798]: I1011 03:56:05.853194 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:05Z","lastTransitionTime":"2025-10-11T03:56:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:05 crc kubenswrapper[4798]: I1011 03:56:05.954918 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:05 crc kubenswrapper[4798]: I1011 03:56:05.954970 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:05 crc kubenswrapper[4798]: I1011 03:56:05.954985 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:05 crc kubenswrapper[4798]: I1011 03:56:05.955006 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:05 crc kubenswrapper[4798]: I1011 03:56:05.955023 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:05Z","lastTransitionTime":"2025-10-11T03:56:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:06 crc kubenswrapper[4798]: I1011 03:56:06.057312 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:06 crc kubenswrapper[4798]: I1011 03:56:06.057351 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:06 crc kubenswrapper[4798]: I1011 03:56:06.057359 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:06 crc kubenswrapper[4798]: I1011 03:56:06.057372 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:06 crc kubenswrapper[4798]: I1011 03:56:06.057380 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:06Z","lastTransitionTime":"2025-10-11T03:56:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:06 crc kubenswrapper[4798]: I1011 03:56:06.159792 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:06 crc kubenswrapper[4798]: I1011 03:56:06.159826 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:06 crc kubenswrapper[4798]: I1011 03:56:06.159836 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:06 crc kubenswrapper[4798]: I1011 03:56:06.159852 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:06 crc kubenswrapper[4798]: I1011 03:56:06.159873 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:06Z","lastTransitionTime":"2025-10-11T03:56:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:06 crc kubenswrapper[4798]: I1011 03:56:06.262452 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:06 crc kubenswrapper[4798]: I1011 03:56:06.262485 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:06 crc kubenswrapper[4798]: I1011 03:56:06.262494 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:06 crc kubenswrapper[4798]: I1011 03:56:06.262506 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:06 crc kubenswrapper[4798]: I1011 03:56:06.262515 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:06Z","lastTransitionTime":"2025-10-11T03:56:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:06 crc kubenswrapper[4798]: I1011 03:56:06.364377 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:06 crc kubenswrapper[4798]: I1011 03:56:06.365182 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:06 crc kubenswrapper[4798]: I1011 03:56:06.365266 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:06 crc kubenswrapper[4798]: I1011 03:56:06.365335 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:06 crc kubenswrapper[4798]: I1011 03:56:06.365413 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:06Z","lastTransitionTime":"2025-10-11T03:56:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:06 crc kubenswrapper[4798]: I1011 03:56:06.469257 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:06 crc kubenswrapper[4798]: I1011 03:56:06.469615 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:06 crc kubenswrapper[4798]: I1011 03:56:06.469745 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:06 crc kubenswrapper[4798]: I1011 03:56:06.469893 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:06 crc kubenswrapper[4798]: I1011 03:56:06.469987 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:06Z","lastTransitionTime":"2025-10-11T03:56:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:06 crc kubenswrapper[4798]: I1011 03:56:06.572216 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:06 crc kubenswrapper[4798]: I1011 03:56:06.572252 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:06 crc kubenswrapper[4798]: I1011 03:56:06.572278 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:06 crc kubenswrapper[4798]: I1011 03:56:06.572296 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:06 crc kubenswrapper[4798]: I1011 03:56:06.572305 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:06Z","lastTransitionTime":"2025-10-11T03:56:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:06 crc kubenswrapper[4798]: I1011 03:56:06.675138 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:06 crc kubenswrapper[4798]: I1011 03:56:06.675498 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:06 crc kubenswrapper[4798]: I1011 03:56:06.675595 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:06 crc kubenswrapper[4798]: I1011 03:56:06.675676 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:06 crc kubenswrapper[4798]: I1011 03:56:06.675737 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:06Z","lastTransitionTime":"2025-10-11T03:56:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:06 crc kubenswrapper[4798]: I1011 03:56:06.780331 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:06 crc kubenswrapper[4798]: I1011 03:56:06.780991 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:06 crc kubenswrapper[4798]: I1011 03:56:06.781253 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:06 crc kubenswrapper[4798]: I1011 03:56:06.781376 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:06 crc kubenswrapper[4798]: I1011 03:56:06.781516 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:06Z","lastTransitionTime":"2025-10-11T03:56:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:06 crc kubenswrapper[4798]: I1011 03:56:06.883196 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:06 crc kubenswrapper[4798]: I1011 03:56:06.883241 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:06 crc kubenswrapper[4798]: I1011 03:56:06.883257 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:06 crc kubenswrapper[4798]: I1011 03:56:06.883279 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:06 crc kubenswrapper[4798]: I1011 03:56:06.883304 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:06Z","lastTransitionTime":"2025-10-11T03:56:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:06 crc kubenswrapper[4798]: I1011 03:56:06.985338 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:06 crc kubenswrapper[4798]: I1011 03:56:06.985640 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:06 crc kubenswrapper[4798]: I1011 03:56:06.985730 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:06 crc kubenswrapper[4798]: I1011 03:56:06.985799 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:06 crc kubenswrapper[4798]: I1011 03:56:06.985868 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:06Z","lastTransitionTime":"2025-10-11T03:56:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:07 crc kubenswrapper[4798]: I1011 03:56:07.089171 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:07 crc kubenswrapper[4798]: I1011 03:56:07.089577 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:07 crc kubenswrapper[4798]: I1011 03:56:07.089645 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:07 crc kubenswrapper[4798]: I1011 03:56:07.089879 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:07 crc kubenswrapper[4798]: I1011 03:56:07.089939 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:07Z","lastTransitionTime":"2025-10-11T03:56:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:07 crc kubenswrapper[4798]: I1011 03:56:07.193065 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:07 crc kubenswrapper[4798]: I1011 03:56:07.193119 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:07 crc kubenswrapper[4798]: I1011 03:56:07.193139 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:07 crc kubenswrapper[4798]: I1011 03:56:07.193163 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:07 crc kubenswrapper[4798]: I1011 03:56:07.193182 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:07Z","lastTransitionTime":"2025-10-11T03:56:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:07 crc kubenswrapper[4798]: I1011 03:56:07.295322 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:07 crc kubenswrapper[4798]: I1011 03:56:07.295356 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:07 crc kubenswrapper[4798]: I1011 03:56:07.295366 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:07 crc kubenswrapper[4798]: I1011 03:56:07.295381 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:07 crc kubenswrapper[4798]: I1011 03:56:07.295412 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:07Z","lastTransitionTime":"2025-10-11T03:56:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:07 crc kubenswrapper[4798]: I1011 03:56:07.399368 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:07 crc kubenswrapper[4798]: I1011 03:56:07.399428 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:07 crc kubenswrapper[4798]: I1011 03:56:07.399438 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:07 crc kubenswrapper[4798]: I1011 03:56:07.399461 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:07 crc kubenswrapper[4798]: I1011 03:56:07.399471 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:07Z","lastTransitionTime":"2025-10-11T03:56:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:07 crc kubenswrapper[4798]: I1011 03:56:07.423347 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-5bfzt" Oct 11 03:56:07 crc kubenswrapper[4798]: I1011 03:56:07.423347 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 11 03:56:07 crc kubenswrapper[4798]: E1011 03:56:07.423576 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-5bfzt" podUID="3cf2b185-9a26-4448-8fc5-4885f98daf87" Oct 11 03:56:07 crc kubenswrapper[4798]: I1011 03:56:07.423347 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 11 03:56:07 crc kubenswrapper[4798]: E1011 03:56:07.423672 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 11 03:56:07 crc kubenswrapper[4798]: E1011 03:56:07.423743 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 11 03:56:07 crc kubenswrapper[4798]: I1011 03:56:07.423991 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 11 03:56:07 crc kubenswrapper[4798]: E1011 03:56:07.424180 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 11 03:56:07 crc kubenswrapper[4798]: I1011 03:56:07.501873 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:07 crc kubenswrapper[4798]: I1011 03:56:07.501922 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:07 crc kubenswrapper[4798]: I1011 03:56:07.501937 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:07 crc kubenswrapper[4798]: I1011 03:56:07.501955 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:07 crc kubenswrapper[4798]: I1011 03:56:07.501968 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:07Z","lastTransitionTime":"2025-10-11T03:56:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:07 crc kubenswrapper[4798]: I1011 03:56:07.604738 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:07 crc kubenswrapper[4798]: I1011 03:56:07.604776 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:07 crc kubenswrapper[4798]: I1011 03:56:07.604787 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:07 crc kubenswrapper[4798]: I1011 03:56:07.604802 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:07 crc kubenswrapper[4798]: I1011 03:56:07.604813 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:07Z","lastTransitionTime":"2025-10-11T03:56:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:07 crc kubenswrapper[4798]: I1011 03:56:07.706652 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:07 crc kubenswrapper[4798]: I1011 03:56:07.706938 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:07 crc kubenswrapper[4798]: I1011 03:56:07.707027 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:07 crc kubenswrapper[4798]: I1011 03:56:07.707112 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:07 crc kubenswrapper[4798]: I1011 03:56:07.707176 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:07Z","lastTransitionTime":"2025-10-11T03:56:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:07 crc kubenswrapper[4798]: I1011 03:56:07.810513 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:07 crc kubenswrapper[4798]: I1011 03:56:07.810874 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:07 crc kubenswrapper[4798]: I1011 03:56:07.810958 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:07 crc kubenswrapper[4798]: I1011 03:56:07.811025 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:07 crc kubenswrapper[4798]: I1011 03:56:07.811088 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:07Z","lastTransitionTime":"2025-10-11T03:56:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:07 crc kubenswrapper[4798]: I1011 03:56:07.913648 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:07 crc kubenswrapper[4798]: I1011 03:56:07.913887 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:07 crc kubenswrapper[4798]: I1011 03:56:07.913948 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:07 crc kubenswrapper[4798]: I1011 03:56:07.914083 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:07 crc kubenswrapper[4798]: I1011 03:56:07.914147 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:07Z","lastTransitionTime":"2025-10-11T03:56:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:08 crc kubenswrapper[4798]: I1011 03:56:08.017275 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:08 crc kubenswrapper[4798]: I1011 03:56:08.017663 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:08 crc kubenswrapper[4798]: I1011 03:56:08.018211 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:08 crc kubenswrapper[4798]: I1011 03:56:08.018445 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:08 crc kubenswrapper[4798]: I1011 03:56:08.018587 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:08Z","lastTransitionTime":"2025-10-11T03:56:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:08 crc kubenswrapper[4798]: I1011 03:56:08.122396 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:08 crc kubenswrapper[4798]: I1011 03:56:08.122906 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:08 crc kubenswrapper[4798]: I1011 03:56:08.123072 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:08 crc kubenswrapper[4798]: I1011 03:56:08.123230 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:08 crc kubenswrapper[4798]: I1011 03:56:08.123374 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:08Z","lastTransitionTime":"2025-10-11T03:56:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:08 crc kubenswrapper[4798]: I1011 03:56:08.227235 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:08 crc kubenswrapper[4798]: I1011 03:56:08.227271 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:08 crc kubenswrapper[4798]: I1011 03:56:08.227281 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:08 crc kubenswrapper[4798]: I1011 03:56:08.227294 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:08 crc kubenswrapper[4798]: I1011 03:56:08.227309 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:08Z","lastTransitionTime":"2025-10-11T03:56:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:08 crc kubenswrapper[4798]: I1011 03:56:08.329621 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:08 crc kubenswrapper[4798]: I1011 03:56:08.329889 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:08 crc kubenswrapper[4798]: I1011 03:56:08.330014 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:08 crc kubenswrapper[4798]: I1011 03:56:08.330117 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:08 crc kubenswrapper[4798]: I1011 03:56:08.330198 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:08Z","lastTransitionTime":"2025-10-11T03:56:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:08 crc kubenswrapper[4798]: I1011 03:56:08.432401 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:08 crc kubenswrapper[4798]: I1011 03:56:08.433251 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:08 crc kubenswrapper[4798]: I1011 03:56:08.433367 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:08 crc kubenswrapper[4798]: I1011 03:56:08.433475 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:08 crc kubenswrapper[4798]: I1011 03:56:08.433562 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:08Z","lastTransitionTime":"2025-10-11T03:56:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:08 crc kubenswrapper[4798]: I1011 03:56:08.536421 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:08 crc kubenswrapper[4798]: I1011 03:56:08.536744 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:08 crc kubenswrapper[4798]: I1011 03:56:08.536857 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:08 crc kubenswrapper[4798]: I1011 03:56:08.536970 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:08 crc kubenswrapper[4798]: I1011 03:56:08.537051 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:08Z","lastTransitionTime":"2025-10-11T03:56:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:08 crc kubenswrapper[4798]: I1011 03:56:08.639196 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:08 crc kubenswrapper[4798]: I1011 03:56:08.639647 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:08 crc kubenswrapper[4798]: I1011 03:56:08.639877 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:08 crc kubenswrapper[4798]: I1011 03:56:08.639987 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:08 crc kubenswrapper[4798]: I1011 03:56:08.640057 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:08Z","lastTransitionTime":"2025-10-11T03:56:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:08 crc kubenswrapper[4798]: I1011 03:56:08.751265 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:08 crc kubenswrapper[4798]: I1011 03:56:08.751303 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:08 crc kubenswrapper[4798]: I1011 03:56:08.751313 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:08 crc kubenswrapper[4798]: I1011 03:56:08.751326 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:08 crc kubenswrapper[4798]: I1011 03:56:08.751334 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:08Z","lastTransitionTime":"2025-10-11T03:56:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:08 crc kubenswrapper[4798]: I1011 03:56:08.854171 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:08 crc kubenswrapper[4798]: I1011 03:56:08.854221 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:08 crc kubenswrapper[4798]: I1011 03:56:08.854238 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:08 crc kubenswrapper[4798]: I1011 03:56:08.854261 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:08 crc kubenswrapper[4798]: I1011 03:56:08.854279 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:08Z","lastTransitionTime":"2025-10-11T03:56:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:08 crc kubenswrapper[4798]: I1011 03:56:08.957177 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:08 crc kubenswrapper[4798]: I1011 03:56:08.957206 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:08 crc kubenswrapper[4798]: I1011 03:56:08.957215 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:08 crc kubenswrapper[4798]: I1011 03:56:08.957226 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:08 crc kubenswrapper[4798]: I1011 03:56:08.957235 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:08Z","lastTransitionTime":"2025-10-11T03:56:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:09 crc kubenswrapper[4798]: I1011 03:56:09.059873 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:09 crc kubenswrapper[4798]: I1011 03:56:09.059907 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:09 crc kubenswrapper[4798]: I1011 03:56:09.059917 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:09 crc kubenswrapper[4798]: I1011 03:56:09.059930 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:09 crc kubenswrapper[4798]: I1011 03:56:09.059938 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:09Z","lastTransitionTime":"2025-10-11T03:56:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:09 crc kubenswrapper[4798]: I1011 03:56:09.162980 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:09 crc kubenswrapper[4798]: I1011 03:56:09.163248 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:09 crc kubenswrapper[4798]: I1011 03:56:09.163340 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:09 crc kubenswrapper[4798]: I1011 03:56:09.163449 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:09 crc kubenswrapper[4798]: I1011 03:56:09.163540 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:09Z","lastTransitionTime":"2025-10-11T03:56:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:09 crc kubenswrapper[4798]: I1011 03:56:09.265486 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:09 crc kubenswrapper[4798]: I1011 03:56:09.265527 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:09 crc kubenswrapper[4798]: I1011 03:56:09.265540 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:09 crc kubenswrapper[4798]: I1011 03:56:09.265557 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:09 crc kubenswrapper[4798]: I1011 03:56:09.265567 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:09Z","lastTransitionTime":"2025-10-11T03:56:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:09 crc kubenswrapper[4798]: I1011 03:56:09.368244 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:09 crc kubenswrapper[4798]: I1011 03:56:09.368333 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:09 crc kubenswrapper[4798]: I1011 03:56:09.368346 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:09 crc kubenswrapper[4798]: I1011 03:56:09.368369 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:09 crc kubenswrapper[4798]: I1011 03:56:09.368384 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:09Z","lastTransitionTime":"2025-10-11T03:56:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:09 crc kubenswrapper[4798]: I1011 03:56:09.423627 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 11 03:56:09 crc kubenswrapper[4798]: I1011 03:56:09.423745 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-5bfzt" Oct 11 03:56:09 crc kubenswrapper[4798]: E1011 03:56:09.423780 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 11 03:56:09 crc kubenswrapper[4798]: I1011 03:56:09.423908 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 11 03:56:09 crc kubenswrapper[4798]: E1011 03:56:09.424004 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-5bfzt" podUID="3cf2b185-9a26-4448-8fc5-4885f98daf87" Oct 11 03:56:09 crc kubenswrapper[4798]: E1011 03:56:09.424168 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 11 03:56:09 crc kubenswrapper[4798]: I1011 03:56:09.423920 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 11 03:56:09 crc kubenswrapper[4798]: E1011 03:56:09.424282 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 11 03:56:09 crc kubenswrapper[4798]: I1011 03:56:09.470848 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:09 crc kubenswrapper[4798]: I1011 03:56:09.470916 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:09 crc kubenswrapper[4798]: I1011 03:56:09.470928 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:09 crc kubenswrapper[4798]: I1011 03:56:09.470943 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:09 crc kubenswrapper[4798]: I1011 03:56:09.470952 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:09Z","lastTransitionTime":"2025-10-11T03:56:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:09 crc kubenswrapper[4798]: I1011 03:56:09.573339 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:09 crc kubenswrapper[4798]: I1011 03:56:09.573376 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:09 crc kubenswrapper[4798]: I1011 03:56:09.573387 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:09 crc kubenswrapper[4798]: I1011 03:56:09.573418 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:09 crc kubenswrapper[4798]: I1011 03:56:09.573430 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:09Z","lastTransitionTime":"2025-10-11T03:56:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:09 crc kubenswrapper[4798]: I1011 03:56:09.675898 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:09 crc kubenswrapper[4798]: I1011 03:56:09.676140 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:09 crc kubenswrapper[4798]: I1011 03:56:09.676202 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:09 crc kubenswrapper[4798]: I1011 03:56:09.676268 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:09 crc kubenswrapper[4798]: I1011 03:56:09.676323 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:09Z","lastTransitionTime":"2025-10-11T03:56:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:09 crc kubenswrapper[4798]: I1011 03:56:09.779178 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:09 crc kubenswrapper[4798]: I1011 03:56:09.779223 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:09 crc kubenswrapper[4798]: I1011 03:56:09.779234 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:09 crc kubenswrapper[4798]: I1011 03:56:09.779250 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:09 crc kubenswrapper[4798]: I1011 03:56:09.779263 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:09Z","lastTransitionTime":"2025-10-11T03:56:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:09 crc kubenswrapper[4798]: I1011 03:56:09.881582 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:09 crc kubenswrapper[4798]: I1011 03:56:09.881652 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:09 crc kubenswrapper[4798]: I1011 03:56:09.881675 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:09 crc kubenswrapper[4798]: I1011 03:56:09.881703 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:09 crc kubenswrapper[4798]: I1011 03:56:09.881723 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:09Z","lastTransitionTime":"2025-10-11T03:56:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:09 crc kubenswrapper[4798]: I1011 03:56:09.984468 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:09 crc kubenswrapper[4798]: I1011 03:56:09.984501 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:09 crc kubenswrapper[4798]: I1011 03:56:09.984513 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:09 crc kubenswrapper[4798]: I1011 03:56:09.984529 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:09 crc kubenswrapper[4798]: I1011 03:56:09.984540 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:09Z","lastTransitionTime":"2025-10-11T03:56:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:10 crc kubenswrapper[4798]: I1011 03:56:10.086344 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:10 crc kubenswrapper[4798]: I1011 03:56:10.086377 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:10 crc kubenswrapper[4798]: I1011 03:56:10.086385 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:10 crc kubenswrapper[4798]: I1011 03:56:10.086413 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:10 crc kubenswrapper[4798]: I1011 03:56:10.086423 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:10Z","lastTransitionTime":"2025-10-11T03:56:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:10 crc kubenswrapper[4798]: I1011 03:56:10.188510 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:10 crc kubenswrapper[4798]: I1011 03:56:10.188552 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:10 crc kubenswrapper[4798]: I1011 03:56:10.188562 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:10 crc kubenswrapper[4798]: I1011 03:56:10.188580 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:10 crc kubenswrapper[4798]: I1011 03:56:10.188594 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:10Z","lastTransitionTime":"2025-10-11T03:56:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:10 crc kubenswrapper[4798]: I1011 03:56:10.291962 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:10 crc kubenswrapper[4798]: I1011 03:56:10.292020 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:10 crc kubenswrapper[4798]: I1011 03:56:10.292031 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:10 crc kubenswrapper[4798]: I1011 03:56:10.292051 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:10 crc kubenswrapper[4798]: I1011 03:56:10.292062 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:10Z","lastTransitionTime":"2025-10-11T03:56:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:10 crc kubenswrapper[4798]: I1011 03:56:10.395664 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:10 crc kubenswrapper[4798]: I1011 03:56:10.395702 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:10 crc kubenswrapper[4798]: I1011 03:56:10.395712 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:10 crc kubenswrapper[4798]: I1011 03:56:10.395728 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:10 crc kubenswrapper[4798]: I1011 03:56:10.395740 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:10Z","lastTransitionTime":"2025-10-11T03:56:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:10 crc kubenswrapper[4798]: I1011 03:56:10.500023 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:10 crc kubenswrapper[4798]: I1011 03:56:10.500142 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:10 crc kubenswrapper[4798]: I1011 03:56:10.500175 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:10 crc kubenswrapper[4798]: I1011 03:56:10.500219 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:10 crc kubenswrapper[4798]: I1011 03:56:10.500261 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:10Z","lastTransitionTime":"2025-10-11T03:56:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:10 crc kubenswrapper[4798]: I1011 03:56:10.603216 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:10 crc kubenswrapper[4798]: I1011 03:56:10.603260 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:10 crc kubenswrapper[4798]: I1011 03:56:10.603272 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:10 crc kubenswrapper[4798]: I1011 03:56:10.603291 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:10 crc kubenswrapper[4798]: I1011 03:56:10.603303 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:10Z","lastTransitionTime":"2025-10-11T03:56:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:10 crc kubenswrapper[4798]: I1011 03:56:10.705970 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:10 crc kubenswrapper[4798]: I1011 03:56:10.706011 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:10 crc kubenswrapper[4798]: I1011 03:56:10.706024 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:10 crc kubenswrapper[4798]: I1011 03:56:10.706041 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:10 crc kubenswrapper[4798]: I1011 03:56:10.706053 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:10Z","lastTransitionTime":"2025-10-11T03:56:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:10 crc kubenswrapper[4798]: I1011 03:56:10.808776 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:10 crc kubenswrapper[4798]: I1011 03:56:10.808819 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:10 crc kubenswrapper[4798]: I1011 03:56:10.808835 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:10 crc kubenswrapper[4798]: I1011 03:56:10.808856 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:10 crc kubenswrapper[4798]: I1011 03:56:10.808871 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:10Z","lastTransitionTime":"2025-10-11T03:56:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:10 crc kubenswrapper[4798]: I1011 03:56:10.911158 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:10 crc kubenswrapper[4798]: I1011 03:56:10.911215 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:10 crc kubenswrapper[4798]: I1011 03:56:10.911229 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:10 crc kubenswrapper[4798]: I1011 03:56:10.911248 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:10 crc kubenswrapper[4798]: I1011 03:56:10.911261 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:10Z","lastTransitionTime":"2025-10-11T03:56:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:11 crc kubenswrapper[4798]: I1011 03:56:11.014665 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:11 crc kubenswrapper[4798]: I1011 03:56:11.014744 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:11 crc kubenswrapper[4798]: I1011 03:56:11.014766 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:11 crc kubenswrapper[4798]: I1011 03:56:11.014801 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:11 crc kubenswrapper[4798]: I1011 03:56:11.014826 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:11Z","lastTransitionTime":"2025-10-11T03:56:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:11 crc kubenswrapper[4798]: I1011 03:56:11.117536 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:11 crc kubenswrapper[4798]: I1011 03:56:11.117598 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:11 crc kubenswrapper[4798]: I1011 03:56:11.117616 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:11 crc kubenswrapper[4798]: I1011 03:56:11.117641 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:11 crc kubenswrapper[4798]: I1011 03:56:11.117660 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:11Z","lastTransitionTime":"2025-10-11T03:56:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:11 crc kubenswrapper[4798]: I1011 03:56:11.220269 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:11 crc kubenswrapper[4798]: I1011 03:56:11.220318 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:11 crc kubenswrapper[4798]: I1011 03:56:11.220335 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:11 crc kubenswrapper[4798]: I1011 03:56:11.220357 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:11 crc kubenswrapper[4798]: I1011 03:56:11.220374 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:11Z","lastTransitionTime":"2025-10-11T03:56:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:11 crc kubenswrapper[4798]: I1011 03:56:11.322817 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:11 crc kubenswrapper[4798]: I1011 03:56:11.322855 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:11 crc kubenswrapper[4798]: I1011 03:56:11.322867 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:11 crc kubenswrapper[4798]: I1011 03:56:11.322884 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:11 crc kubenswrapper[4798]: I1011 03:56:11.322894 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:11Z","lastTransitionTime":"2025-10-11T03:56:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:11 crc kubenswrapper[4798]: I1011 03:56:11.401673 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/3cf2b185-9a26-4448-8fc5-4885f98daf87-metrics-certs\") pod \"network-metrics-daemon-5bfzt\" (UID: \"3cf2b185-9a26-4448-8fc5-4885f98daf87\") " pod="openshift-multus/network-metrics-daemon-5bfzt" Oct 11 03:56:11 crc kubenswrapper[4798]: E1011 03:56:11.401884 4798 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Oct 11 03:56:11 crc kubenswrapper[4798]: E1011 03:56:11.401996 4798 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/3cf2b185-9a26-4448-8fc5-4885f98daf87-metrics-certs podName:3cf2b185-9a26-4448-8fc5-4885f98daf87 nodeName:}" failed. No retries permitted until 2025-10-11 03:56:43.401973726 +0000 UTC m=+98.738263402 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/3cf2b185-9a26-4448-8fc5-4885f98daf87-metrics-certs") pod "network-metrics-daemon-5bfzt" (UID: "3cf2b185-9a26-4448-8fc5-4885f98daf87") : object "openshift-multus"/"metrics-daemon-secret" not registered Oct 11 03:56:11 crc kubenswrapper[4798]: I1011 03:56:11.422781 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-5bfzt" Oct 11 03:56:11 crc kubenswrapper[4798]: I1011 03:56:11.422836 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 11 03:56:11 crc kubenswrapper[4798]: I1011 03:56:11.422877 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 11 03:56:11 crc kubenswrapper[4798]: I1011 03:56:11.422892 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 11 03:56:11 crc kubenswrapper[4798]: E1011 03:56:11.423003 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 11 03:56:11 crc kubenswrapper[4798]: E1011 03:56:11.423117 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 11 03:56:11 crc kubenswrapper[4798]: E1011 03:56:11.423248 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-5bfzt" podUID="3cf2b185-9a26-4448-8fc5-4885f98daf87" Oct 11 03:56:11 crc kubenswrapper[4798]: E1011 03:56:11.423500 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 11 03:56:11 crc kubenswrapper[4798]: I1011 03:56:11.425435 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:11 crc kubenswrapper[4798]: I1011 03:56:11.425492 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:11 crc kubenswrapper[4798]: I1011 03:56:11.425511 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:11 crc kubenswrapper[4798]: I1011 03:56:11.425539 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:11 crc kubenswrapper[4798]: I1011 03:56:11.425560 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:11Z","lastTransitionTime":"2025-10-11T03:56:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:11 crc kubenswrapper[4798]: I1011 03:56:11.528552 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:11 crc kubenswrapper[4798]: I1011 03:56:11.528588 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:11 crc kubenswrapper[4798]: I1011 03:56:11.528600 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:11 crc kubenswrapper[4798]: I1011 03:56:11.528617 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:11 crc kubenswrapper[4798]: I1011 03:56:11.528631 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:11Z","lastTransitionTime":"2025-10-11T03:56:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:11 crc kubenswrapper[4798]: I1011 03:56:11.631687 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:11 crc kubenswrapper[4798]: I1011 03:56:11.631742 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:11 crc kubenswrapper[4798]: I1011 03:56:11.631751 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:11 crc kubenswrapper[4798]: I1011 03:56:11.631765 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:11 crc kubenswrapper[4798]: I1011 03:56:11.631777 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:11Z","lastTransitionTime":"2025-10-11T03:56:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:11 crc kubenswrapper[4798]: I1011 03:56:11.733986 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:11 crc kubenswrapper[4798]: I1011 03:56:11.734049 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:11 crc kubenswrapper[4798]: I1011 03:56:11.734062 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:11 crc kubenswrapper[4798]: I1011 03:56:11.734079 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:11 crc kubenswrapper[4798]: I1011 03:56:11.734093 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:11Z","lastTransitionTime":"2025-10-11T03:56:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:11 crc kubenswrapper[4798]: I1011 03:56:11.836113 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:11 crc kubenswrapper[4798]: I1011 03:56:11.836165 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:11 crc kubenswrapper[4798]: I1011 03:56:11.836174 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:11 crc kubenswrapper[4798]: I1011 03:56:11.836190 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:11 crc kubenswrapper[4798]: I1011 03:56:11.836199 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:11Z","lastTransitionTime":"2025-10-11T03:56:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:11 crc kubenswrapper[4798]: I1011 03:56:11.938753 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:11 crc kubenswrapper[4798]: I1011 03:56:11.938815 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:11 crc kubenswrapper[4798]: I1011 03:56:11.938833 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:11 crc kubenswrapper[4798]: I1011 03:56:11.938860 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:11 crc kubenswrapper[4798]: I1011 03:56:11.938880 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:11Z","lastTransitionTime":"2025-10-11T03:56:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:12 crc kubenswrapper[4798]: I1011 03:56:12.041776 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:12 crc kubenswrapper[4798]: I1011 03:56:12.041892 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:12 crc kubenswrapper[4798]: I1011 03:56:12.041912 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:12 crc kubenswrapper[4798]: I1011 03:56:12.041938 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:12 crc kubenswrapper[4798]: I1011 03:56:12.041959 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:12Z","lastTransitionTime":"2025-10-11T03:56:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:12 crc kubenswrapper[4798]: I1011 03:56:12.144594 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:12 crc kubenswrapper[4798]: I1011 03:56:12.144640 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:12 crc kubenswrapper[4798]: I1011 03:56:12.144649 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:12 crc kubenswrapper[4798]: I1011 03:56:12.144664 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:12 crc kubenswrapper[4798]: I1011 03:56:12.144677 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:12Z","lastTransitionTime":"2025-10-11T03:56:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:12 crc kubenswrapper[4798]: I1011 03:56:12.247805 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:12 crc kubenswrapper[4798]: I1011 03:56:12.247861 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:12 crc kubenswrapper[4798]: I1011 03:56:12.247874 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:12 crc kubenswrapper[4798]: I1011 03:56:12.247890 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:12 crc kubenswrapper[4798]: I1011 03:56:12.247906 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:12Z","lastTransitionTime":"2025-10-11T03:56:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:12 crc kubenswrapper[4798]: I1011 03:56:12.351980 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:12 crc kubenswrapper[4798]: I1011 03:56:12.352033 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:12 crc kubenswrapper[4798]: I1011 03:56:12.352045 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:12 crc kubenswrapper[4798]: I1011 03:56:12.352063 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:12 crc kubenswrapper[4798]: I1011 03:56:12.352076 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:12Z","lastTransitionTime":"2025-10-11T03:56:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:12 crc kubenswrapper[4798]: I1011 03:56:12.454875 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:12 crc kubenswrapper[4798]: I1011 03:56:12.454941 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:12 crc kubenswrapper[4798]: I1011 03:56:12.454959 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:12 crc kubenswrapper[4798]: I1011 03:56:12.454986 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:12 crc kubenswrapper[4798]: I1011 03:56:12.455003 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:12Z","lastTransitionTime":"2025-10-11T03:56:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:12 crc kubenswrapper[4798]: I1011 03:56:12.461590 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:12 crc kubenswrapper[4798]: I1011 03:56:12.461657 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:12 crc kubenswrapper[4798]: I1011 03:56:12.461670 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:12 crc kubenswrapper[4798]: I1011 03:56:12.461690 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:12 crc kubenswrapper[4798]: I1011 03:56:12.461703 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:12Z","lastTransitionTime":"2025-10-11T03:56:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:12 crc kubenswrapper[4798]: E1011 03:56:12.482200 4798 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T03:56:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T03:56:12Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T03:56:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T03:56:12Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T03:56:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T03:56:12Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T03:56:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T03:56:12Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"319d317c-5958-4e72-8ec1-75f04665505c\\\",\\\"systemUUID\\\":\\\"914a85fd-a642-4471-b13e-f33764f494b0\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:12Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:12 crc kubenswrapper[4798]: I1011 03:56:12.487274 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:12 crc kubenswrapper[4798]: I1011 03:56:12.487340 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:12 crc kubenswrapper[4798]: I1011 03:56:12.487357 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:12 crc kubenswrapper[4798]: I1011 03:56:12.487407 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:12 crc kubenswrapper[4798]: I1011 03:56:12.487426 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:12Z","lastTransitionTime":"2025-10-11T03:56:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:12 crc kubenswrapper[4798]: E1011 03:56:12.501210 4798 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T03:56:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T03:56:12Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T03:56:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T03:56:12Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T03:56:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T03:56:12Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T03:56:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T03:56:12Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"319d317c-5958-4e72-8ec1-75f04665505c\\\",\\\"systemUUID\\\":\\\"914a85fd-a642-4471-b13e-f33764f494b0\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:12Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:12 crc kubenswrapper[4798]: I1011 03:56:12.506333 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:12 crc kubenswrapper[4798]: I1011 03:56:12.506370 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:12 crc kubenswrapper[4798]: I1011 03:56:12.506380 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:12 crc kubenswrapper[4798]: I1011 03:56:12.506410 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:12 crc kubenswrapper[4798]: I1011 03:56:12.506421 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:12Z","lastTransitionTime":"2025-10-11T03:56:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:12 crc kubenswrapper[4798]: E1011 03:56:12.523891 4798 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T03:56:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T03:56:12Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T03:56:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T03:56:12Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T03:56:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T03:56:12Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T03:56:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T03:56:12Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"319d317c-5958-4e72-8ec1-75f04665505c\\\",\\\"systemUUID\\\":\\\"914a85fd-a642-4471-b13e-f33764f494b0\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:12Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:12 crc kubenswrapper[4798]: I1011 03:56:12.528315 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:12 crc kubenswrapper[4798]: I1011 03:56:12.528348 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:12 crc kubenswrapper[4798]: I1011 03:56:12.528365 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:12 crc kubenswrapper[4798]: I1011 03:56:12.528385 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:12 crc kubenswrapper[4798]: I1011 03:56:12.528414 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:12Z","lastTransitionTime":"2025-10-11T03:56:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:12 crc kubenswrapper[4798]: E1011 03:56:12.542180 4798 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T03:56:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T03:56:12Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T03:56:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T03:56:12Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T03:56:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T03:56:12Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T03:56:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T03:56:12Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"319d317c-5958-4e72-8ec1-75f04665505c\\\",\\\"systemUUID\\\":\\\"914a85fd-a642-4471-b13e-f33764f494b0\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:12Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:12 crc kubenswrapper[4798]: I1011 03:56:12.546652 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:12 crc kubenswrapper[4798]: I1011 03:56:12.546699 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:12 crc kubenswrapper[4798]: I1011 03:56:12.546711 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:12 crc kubenswrapper[4798]: I1011 03:56:12.546730 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:12 crc kubenswrapper[4798]: I1011 03:56:12.546742 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:12Z","lastTransitionTime":"2025-10-11T03:56:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:12 crc kubenswrapper[4798]: E1011 03:56:12.559538 4798 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T03:56:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T03:56:12Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T03:56:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T03:56:12Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T03:56:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T03:56:12Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T03:56:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T03:56:12Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"319d317c-5958-4e72-8ec1-75f04665505c\\\",\\\"systemUUID\\\":\\\"914a85fd-a642-4471-b13e-f33764f494b0\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:12Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:12 crc kubenswrapper[4798]: E1011 03:56:12.559704 4798 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Oct 11 03:56:12 crc kubenswrapper[4798]: I1011 03:56:12.561809 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:12 crc kubenswrapper[4798]: I1011 03:56:12.561854 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:12 crc kubenswrapper[4798]: I1011 03:56:12.561868 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:12 crc kubenswrapper[4798]: I1011 03:56:12.561890 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:12 crc kubenswrapper[4798]: I1011 03:56:12.561903 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:12Z","lastTransitionTime":"2025-10-11T03:56:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:12 crc kubenswrapper[4798]: I1011 03:56:12.665760 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:12 crc kubenswrapper[4798]: I1011 03:56:12.665811 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:12 crc kubenswrapper[4798]: I1011 03:56:12.665824 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:12 crc kubenswrapper[4798]: I1011 03:56:12.665847 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:12 crc kubenswrapper[4798]: I1011 03:56:12.665861 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:12Z","lastTransitionTime":"2025-10-11T03:56:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:12 crc kubenswrapper[4798]: I1011 03:56:12.769503 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:12 crc kubenswrapper[4798]: I1011 03:56:12.769570 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:12 crc kubenswrapper[4798]: I1011 03:56:12.769587 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:12 crc kubenswrapper[4798]: I1011 03:56:12.769613 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:12 crc kubenswrapper[4798]: I1011 03:56:12.769631 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:12Z","lastTransitionTime":"2025-10-11T03:56:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:12 crc kubenswrapper[4798]: I1011 03:56:12.872844 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:12 crc kubenswrapper[4798]: I1011 03:56:12.872901 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:12 crc kubenswrapper[4798]: I1011 03:56:12.872915 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:12 crc kubenswrapper[4798]: I1011 03:56:12.872938 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:12 crc kubenswrapper[4798]: I1011 03:56:12.872953 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:12Z","lastTransitionTime":"2025-10-11T03:56:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:12 crc kubenswrapper[4798]: I1011 03:56:12.976919 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:12 crc kubenswrapper[4798]: I1011 03:56:12.977157 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:12 crc kubenswrapper[4798]: I1011 03:56:12.977168 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:12 crc kubenswrapper[4798]: I1011 03:56:12.977191 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:12 crc kubenswrapper[4798]: I1011 03:56:12.977205 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:12Z","lastTransitionTime":"2025-10-11T03:56:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:13 crc kubenswrapper[4798]: I1011 03:56:13.080085 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:13 crc kubenswrapper[4798]: I1011 03:56:13.080165 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:13 crc kubenswrapper[4798]: I1011 03:56:13.080184 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:13 crc kubenswrapper[4798]: I1011 03:56:13.080208 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:13 crc kubenswrapper[4798]: I1011 03:56:13.080226 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:13Z","lastTransitionTime":"2025-10-11T03:56:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:13 crc kubenswrapper[4798]: I1011 03:56:13.184607 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:13 crc kubenswrapper[4798]: I1011 03:56:13.184704 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:13 crc kubenswrapper[4798]: I1011 03:56:13.184736 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:13 crc kubenswrapper[4798]: I1011 03:56:13.184783 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:13 crc kubenswrapper[4798]: I1011 03:56:13.184810 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:13Z","lastTransitionTime":"2025-10-11T03:56:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:13 crc kubenswrapper[4798]: I1011 03:56:13.320701 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:13 crc kubenswrapper[4798]: I1011 03:56:13.320760 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:13 crc kubenswrapper[4798]: I1011 03:56:13.320773 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:13 crc kubenswrapper[4798]: I1011 03:56:13.320795 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:13 crc kubenswrapper[4798]: I1011 03:56:13.320811 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:13Z","lastTransitionTime":"2025-10-11T03:56:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:13 crc kubenswrapper[4798]: I1011 03:56:13.423019 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-5bfzt" Oct 11 03:56:13 crc kubenswrapper[4798]: I1011 03:56:13.423160 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 11 03:56:13 crc kubenswrapper[4798]: I1011 03:56:13.423230 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 11 03:56:13 crc kubenswrapper[4798]: E1011 03:56:13.423250 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-5bfzt" podUID="3cf2b185-9a26-4448-8fc5-4885f98daf87" Oct 11 03:56:13 crc kubenswrapper[4798]: E1011 03:56:13.423422 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 11 03:56:13 crc kubenswrapper[4798]: E1011 03:56:13.423561 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 11 03:56:13 crc kubenswrapper[4798]: I1011 03:56:13.423782 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 11 03:56:13 crc kubenswrapper[4798]: E1011 03:56:13.423867 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 11 03:56:13 crc kubenswrapper[4798]: I1011 03:56:13.426163 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:13 crc kubenswrapper[4798]: I1011 03:56:13.426225 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:13 crc kubenswrapper[4798]: I1011 03:56:13.427656 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:13 crc kubenswrapper[4798]: I1011 03:56:13.427697 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:13 crc kubenswrapper[4798]: I1011 03:56:13.427722 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:13Z","lastTransitionTime":"2025-10-11T03:56:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:13 crc kubenswrapper[4798]: I1011 03:56:13.530328 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:13 crc kubenswrapper[4798]: I1011 03:56:13.530369 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:13 crc kubenswrapper[4798]: I1011 03:56:13.530378 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:13 crc kubenswrapper[4798]: I1011 03:56:13.530411 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:13 crc kubenswrapper[4798]: I1011 03:56:13.530422 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:13Z","lastTransitionTime":"2025-10-11T03:56:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:13 crc kubenswrapper[4798]: I1011 03:56:13.632933 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:13 crc kubenswrapper[4798]: I1011 03:56:13.632973 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:13 crc kubenswrapper[4798]: I1011 03:56:13.632982 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:13 crc kubenswrapper[4798]: I1011 03:56:13.632994 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:13 crc kubenswrapper[4798]: I1011 03:56:13.633003 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:13Z","lastTransitionTime":"2025-10-11T03:56:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:13 crc kubenswrapper[4798]: I1011 03:56:13.736101 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:13 crc kubenswrapper[4798]: I1011 03:56:13.736194 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:13 crc kubenswrapper[4798]: I1011 03:56:13.736220 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:13 crc kubenswrapper[4798]: I1011 03:56:13.736249 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:13 crc kubenswrapper[4798]: I1011 03:56:13.736271 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:13Z","lastTransitionTime":"2025-10-11T03:56:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:13 crc kubenswrapper[4798]: I1011 03:56:13.839356 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:13 crc kubenswrapper[4798]: I1011 03:56:13.839417 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:13 crc kubenswrapper[4798]: I1011 03:56:13.839429 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:13 crc kubenswrapper[4798]: I1011 03:56:13.839448 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:13 crc kubenswrapper[4798]: I1011 03:56:13.839459 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:13Z","lastTransitionTime":"2025-10-11T03:56:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:13 crc kubenswrapper[4798]: I1011 03:56:13.866195 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-p6xdd_bd9c0e44-3329-422a-907b-e9e9bb6194cc/kube-multus/0.log" Oct 11 03:56:13 crc kubenswrapper[4798]: I1011 03:56:13.866425 4798 generic.go:334] "Generic (PLEG): container finished" podID="bd9c0e44-3329-422a-907b-e9e9bb6194cc" containerID="a46e4bfff29af67bd6304e68df8fb4aa7e2184648b85398f453fde878b27d825" exitCode=1 Oct 11 03:56:13 crc kubenswrapper[4798]: I1011 03:56:13.866491 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-p6xdd" event={"ID":"bd9c0e44-3329-422a-907b-e9e9bb6194cc","Type":"ContainerDied","Data":"a46e4bfff29af67bd6304e68df8fb4aa7e2184648b85398f453fde878b27d825"} Oct 11 03:56:13 crc kubenswrapper[4798]: I1011 03:56:13.867263 4798 scope.go:117] "RemoveContainer" containerID="a46e4bfff29af67bd6304e68df8fb4aa7e2184648b85398f453fde878b27d825" Oct 11 03:56:13 crc kubenswrapper[4798]: I1011 03:56:13.890664 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:13Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:13 crc kubenswrapper[4798]: I1011 03:56:13.903523 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42571bc8-2186-4e3b-bba9-28f5a8f364d0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c66c40f71f0877eee058976f94625fe1c4bcab5dfad363be39e6863c2e9f07a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5gdw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d4862faa12fa49bd57098b8b4e9e9dd287da8c8808a9ca32bfdd3a9b4313945d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5gdw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-h28s2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:13Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:13 crc kubenswrapper[4798]: I1011 03:56:13.919504 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-7fk5l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4b6a179c-1d23-4cb2-91b9-d34496a74456\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5bfabccc875a12072ad81dc6bf74d849c99baa004c10d213f798c363248137db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b908ba64c3d6d1d45e1cab82e69176c3c84bf88dde3e9425fc7c3b4c84685ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8b908ba64c3d6d1d45e1cab82e69176c3c84bf88dde3e9425fc7c3b4c84685ed\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae38c0781a4a8367bba95648253e72409e06cdc39825b137084fa6f4d33a4cb7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ae38c0781a4a8367bba95648253e72409e06cdc39825b137084fa6f4d33a4cb7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b663ffff34d80940880b3959c81519084b55bd5edcedbce9402d5fad1f898034\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b663ffff34d80940880b3959c81519084b55bd5edcedbce9402d5fad1f898034\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5b4df13fbe7bbb9ba49f772c2bd4bfe6a10a17bfa12bc500f0647659be7b0cdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5b4df13fbe7bbb9ba49f772c2bd4bfe6a10a17bfa12bc500f0647659be7b0cdf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://997fd5172ccd393f7befe4343619dd30eba136428035ac9fc71f0ec99791a53b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://997fd5172ccd393f7befe4343619dd30eba136428035ac9fc71f0ec99791a53b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://20198b637812738041fd2e57cdb24097b8f92af726b1ad103fee423469a27136\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://20198b637812738041fd2e57cdb24097b8f92af726b1ad103fee423469a27136\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-7fk5l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:13Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:13 crc kubenswrapper[4798]: I1011 03:56:13.943470 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-svt4z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3c2a342b-5252-4957-9e2c-8f81c304b8af\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5a66de04a274f07f090f66d9da727e1985f9c171038de366b409a0300d1b6cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5667e3d7288833ff8c576f64f111c3a2f59619a3577f969d0afdd633962bd30d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dd63d88fe2ebe6035765ee3f3c05c2ef747f9b2869da239825c08667935c9fb5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://671945e25068ff79ab8574dd96b2d81646b8240da41f34a10aa857f0c2520df2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://337fb496a29d8027f6400b57b1625b68029a16e368139f753b9b94df22448477\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c9529e6c53d42e111799753c41ef620b382b2a2b1991aa71d4ee846029a5136\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a9f55c50b0f40e90d5078225583f31860d2cfb67776e6b11ebdc9a41d8b20baf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a9f55c50b0f40e90d5078225583f31860d2cfb67776e6b11ebdc9a41d8b20baf\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-11T03:55:52Z\\\",\\\"message\\\":\\\"UUIDName:}]\\\\nI1011 03:55:52.357804 6446 model_client.go:398] Mutate operations generated as: [{Op:mutate Table:Logical_Router Row:map[] Rows:[] Columns:[] Mutations:[{Column:nat Mutator:insert Value:{GoSet:[{GoUUID:73135118-cf1b-4568-bd31-2f50308bf69d}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {e3c4661a-36a6-47f0-a6c0-a4ee741f2224}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1011 03:55:52.356105 6446 obj_retry.go:303] Retry object setup: *v1.Pod openshift-kube-apiserver/kube-apiserver-crc\\\\nI1011 03:55:52.357905 6446 obj_retry.go:365] Adding new object: *v1.Pod openshift-kube-apiserver/kube-apiserver-crc\\\\nI1011 03:55:52.357951 6446 ovn.go:134] Ensuring zone local for Pod openshift-kube-apiserver/kube-apiserver-crc in node crc\\\\nI1011 03:55:52.357979 6446 obj_retry.go:386] Retry successful for *v1.Pod openshift-kube-apiserver/kube-apiserver-crc after 0 failed attempt(s)\\\\nI1011 03:55:52.358011 6446 default_network_controller.go:776] Recording success event on pod openshift-kube-apiserver/kube-apiserver-crc\\\\nI1011 03:55:52.358029 6446 services_controller.go:356] Processing sync for service openshift-ingress-operator/metrics for network=default\\\\nF1011 03:55:52.357999 6446 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:51Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-svt4z_openshift-ovn-kubernetes(3c2a342b-5252-4957-9e2c-8f81c304b8af)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://732433bbe785758ed4cbedf667608c4780a5d1e7e2eafd432735f0d74ea01d48\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9f8c139ffa0a5dc24b40f6d5bcc2fa9422f58b275692d3ce3ba5a928c1e454a7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f8c139ffa0a5dc24b40f6d5bcc2fa9422f58b275692d3ce3ba5a928c1e454a7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:26Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-svt4z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:13Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:13 crc kubenswrapper[4798]: I1011 03:56:13.943606 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:13 crc kubenswrapper[4798]: I1011 03:56:13.944079 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:13 crc kubenswrapper[4798]: I1011 03:56:13.944089 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:13 crc kubenswrapper[4798]: I1011 03:56:13.944103 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:13 crc kubenswrapper[4798]: I1011 03:56:13.944113 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:13Z","lastTransitionTime":"2025-10-11T03:56:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:13 crc kubenswrapper[4798]: I1011 03:56:13.955951 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-cwk9h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a97c4ffd-df63-4ce6-8903-80849a23a9fa\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3dc58b162f1d2b3447282c149ec7cd6d60b4c53fa3cf51dfb4a821d115c9a2c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pfvbf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:28Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-cwk9h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:13Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:13 crc kubenswrapper[4798]: I1011 03:56:13.970961 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bfd670e2-5a6d-4d56-97d6-c72b5474cfe6\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d79289a0b2f4d9eee836f57ba898bd329060194772e80855547f1982371b0921\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1970a648bf4ee991b9b58bfade524bee260b45f727cba5864a6545a10487b3ae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a79edde80930f3aad632c14d6ad6b3df6f8449f32107a82eb906785e873e1ab7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4b8e09f509a9f25922d49024be407873d79d01edb604193f9d19fcd6c89cf96d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://805f894634e76814dc3071315ddd3d8ac46cdf65a12e97f4ca56b98fe53ab25a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-11T03:55:19Z\\\",\\\"message\\\":\\\"W1011 03:55:08.564227 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1011 03:55:08.564630 1 crypto.go:601] Generating new CA for check-endpoints-signer@1760154908 cert, and key in /tmp/serving-cert-1527351980/serving-signer.crt, /tmp/serving-cert-1527351980/serving-signer.key\\\\nI1011 03:55:08.869630 1 observer_polling.go:159] Starting file observer\\\\nW1011 03:55:08.874583 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1011 03:55:08.874721 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1011 03:55:08.878058 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1527351980/tls.crt::/tmp/serving-cert-1527351980/tls.key\\\\\\\"\\\\nF1011 03:55:19.813421 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:08Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://58ca7c079d81ba990c4508c024d6db7c3503be1d628477798459b32e2374ef22\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:08Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1171e7b719c532d99d1d222fc5dbc434060d5d149bd2b6b3dea02f9716816b26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1171e7b719c532d99d1d222fc5dbc434060d5d149bd2b6b3dea02f9716816b26\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:05Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:13Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:13 crc kubenswrapper[4798]: I1011 03:56:13.985757 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2f4f8842-e378-44a9-b919-b1bf9a8df80e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://903e13c07c622087e30f21ffff2b60c31c852d8e994c23d7af6beab5a71bb873\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1087e7b8de486a192155ed94497edbca0dc98e4a84a577267fb132fc48e6b142\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://08a33c11cee6a48ebcf66f251111b8bd888bd40fe6b52f43a3768eac817ed59f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eff419f507fc5362e956419c6fd14dc6a161f7297b026a5ec449647051f98767\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:05Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:13Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:14 crc kubenswrapper[4798]: I1011 03:56:13.999290 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4936cbe98df337a2b4d0d6e3effe0afd990facff65daf923c475c9108e05fef9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:13Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:14 crc kubenswrapper[4798]: I1011 03:56:14.012072 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:14Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:14 crc kubenswrapper[4798]: I1011 03:56:14.024761 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kszgd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e999fc8d-dd8d-415f-b72b-1c6189cf18a9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a92a11778001614d226c95af9dde787c78232568c3b5c073618a843f1225b21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h6wm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kszgd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:14Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:14 crc kubenswrapper[4798]: I1011 03:56:14.041083 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:14Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:14 crc kubenswrapper[4798]: I1011 03:56:14.047475 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:14 crc kubenswrapper[4798]: I1011 03:56:14.047523 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:14 crc kubenswrapper[4798]: I1011 03:56:14.047537 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:14 crc kubenswrapper[4798]: I1011 03:56:14.047559 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:14 crc kubenswrapper[4798]: I1011 03:56:14.047574 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:14Z","lastTransitionTime":"2025-10-11T03:56:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:14 crc kubenswrapper[4798]: I1011 03:56:14.055028 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d9be402e-40bb-4e2d-be8b-349646c34f1a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6897a04b7204031c861d74f6a4d475af9e9b88220cf02bfa50e7845c85cd5f70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b88dcbb90798e97c13f7e7ef7bc7171bb1dc33cf488bd453a8d263a02093dfd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b2fb58514dfb39402d4e10ca699975671a3c53a7fe54b374a987efa8ba401ccf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5844d6ac6f40fe8f9056708ee6ffe64a95b58a4dd50582e455860ce6e5628cd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5844d6ac6f40fe8f9056708ee6ffe64a95b58a4dd50582e455860ce6e5628cd3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:06Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:05Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:14Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:14 crc kubenswrapper[4798]: I1011 03:56:14.068204 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5ec61df088ad49a0392264c33da84b7324c4aa608201d71e309401ab80013fb8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://10697a88b117962307b632f70354acdfd493e92c2909908a31c90c754dcbc0ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:14Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:14 crc kubenswrapper[4798]: I1011 03:56:14.080041 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-5bfzt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3cf2b185-9a26-4448-8fc5-4885f98daf87\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jdt7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jdt7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:39Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-5bfzt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:14Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:14 crc kubenswrapper[4798]: I1011 03:56:14.094732 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6129cc4f78a62c5fb76f4859ec954c8901954d27b7f05672e97d992e0c933191\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:14Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:14 crc kubenswrapper[4798]: I1011 03:56:14.108005 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-p6xdd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bd9c0e44-3329-422a-907b-e9e9bb6194cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:56:13Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:56:13Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a46e4bfff29af67bd6304e68df8fb4aa7e2184648b85398f453fde878b27d825\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a46e4bfff29af67bd6304e68df8fb4aa7e2184648b85398f453fde878b27d825\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-11T03:56:13Z\\\",\\\"message\\\":\\\"2025-10-11T03:55:27+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_80fdfc1b-8b4c-4573-acb4-89213eeebcec\\\\n2025-10-11T03:55:27+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_80fdfc1b-8b4c-4573-acb4-89213eeebcec to /host/opt/cni/bin/\\\\n2025-10-11T03:55:27Z [verbose] multus-daemon started\\\\n2025-10-11T03:55:27Z [verbose] Readiness Indicator file check\\\\n2025-10-11T03:56:12Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p9wst\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-p6xdd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:14Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:14 crc kubenswrapper[4798]: I1011 03:56:14.120684 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-5sxhs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"89cbb20d-9fdb-4fed-9ad2-acb1207afc46\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a3655b5826d719e04f0972221ea72554a15025acffa8e2689b4b2855d6151d9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z7jvd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f0c589033410abafbc39c6a7386343c70bb92ed0a1d7336188a44cc6e4fd65c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z7jvd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-5sxhs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:14Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:14 crc kubenswrapper[4798]: I1011 03:56:14.150328 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:14 crc kubenswrapper[4798]: I1011 03:56:14.150376 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:14 crc kubenswrapper[4798]: I1011 03:56:14.150466 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:14 crc kubenswrapper[4798]: I1011 03:56:14.150492 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:14 crc kubenswrapper[4798]: I1011 03:56:14.150512 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:14Z","lastTransitionTime":"2025-10-11T03:56:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:14 crc kubenswrapper[4798]: I1011 03:56:14.254341 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:14 crc kubenswrapper[4798]: I1011 03:56:14.254427 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:14 crc kubenswrapper[4798]: I1011 03:56:14.254446 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:14 crc kubenswrapper[4798]: I1011 03:56:14.254471 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:14 crc kubenswrapper[4798]: I1011 03:56:14.254487 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:14Z","lastTransitionTime":"2025-10-11T03:56:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:14 crc kubenswrapper[4798]: I1011 03:56:14.357581 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:14 crc kubenswrapper[4798]: I1011 03:56:14.357637 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:14 crc kubenswrapper[4798]: I1011 03:56:14.357657 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:14 crc kubenswrapper[4798]: I1011 03:56:14.357682 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:14 crc kubenswrapper[4798]: I1011 03:56:14.357701 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:14Z","lastTransitionTime":"2025-10-11T03:56:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:14 crc kubenswrapper[4798]: I1011 03:56:14.460174 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:14 crc kubenswrapper[4798]: I1011 03:56:14.460231 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:14 crc kubenswrapper[4798]: I1011 03:56:14.460249 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:14 crc kubenswrapper[4798]: I1011 03:56:14.460272 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:14 crc kubenswrapper[4798]: I1011 03:56:14.460289 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:14Z","lastTransitionTime":"2025-10-11T03:56:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:14 crc kubenswrapper[4798]: I1011 03:56:14.563459 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:14 crc kubenswrapper[4798]: I1011 03:56:14.563504 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:14 crc kubenswrapper[4798]: I1011 03:56:14.563515 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:14 crc kubenswrapper[4798]: I1011 03:56:14.563534 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:14 crc kubenswrapper[4798]: I1011 03:56:14.563547 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:14Z","lastTransitionTime":"2025-10-11T03:56:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:14 crc kubenswrapper[4798]: I1011 03:56:14.666661 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:14 crc kubenswrapper[4798]: I1011 03:56:14.667454 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:14 crc kubenswrapper[4798]: I1011 03:56:14.667472 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:14 crc kubenswrapper[4798]: I1011 03:56:14.667490 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:14 crc kubenswrapper[4798]: I1011 03:56:14.667505 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:14Z","lastTransitionTime":"2025-10-11T03:56:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:14 crc kubenswrapper[4798]: I1011 03:56:14.770182 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:14 crc kubenswrapper[4798]: I1011 03:56:14.770234 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:14 crc kubenswrapper[4798]: I1011 03:56:14.770249 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:14 crc kubenswrapper[4798]: I1011 03:56:14.770270 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:14 crc kubenswrapper[4798]: I1011 03:56:14.770285 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:14Z","lastTransitionTime":"2025-10-11T03:56:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:14 crc kubenswrapper[4798]: I1011 03:56:14.871858 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:14 crc kubenswrapper[4798]: I1011 03:56:14.871916 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:14 crc kubenswrapper[4798]: I1011 03:56:14.871931 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:14 crc kubenswrapper[4798]: I1011 03:56:14.871960 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:14 crc kubenswrapper[4798]: I1011 03:56:14.871975 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:14Z","lastTransitionTime":"2025-10-11T03:56:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:14 crc kubenswrapper[4798]: I1011 03:56:14.872244 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-p6xdd_bd9c0e44-3329-422a-907b-e9e9bb6194cc/kube-multus/0.log" Oct 11 03:56:14 crc kubenswrapper[4798]: I1011 03:56:14.872311 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-p6xdd" event={"ID":"bd9c0e44-3329-422a-907b-e9e9bb6194cc","Type":"ContainerStarted","Data":"b5b37de1a8a4b221f421a2bf4f4706f9b2e4ce65611438e10e1bf07daa2bf25a"} Oct 11 03:56:14 crc kubenswrapper[4798]: I1011 03:56:14.887976 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4936cbe98df337a2b4d0d6e3effe0afd990facff65daf923c475c9108e05fef9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:14Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:14 crc kubenswrapper[4798]: I1011 03:56:14.901721 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:14Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:14 crc kubenswrapper[4798]: I1011 03:56:14.913452 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kszgd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e999fc8d-dd8d-415f-b72b-1c6189cf18a9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a92a11778001614d226c95af9dde787c78232568c3b5c073618a843f1225b21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h6wm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kszgd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:14Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:14 crc kubenswrapper[4798]: I1011 03:56:14.926540 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:14Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:14 crc kubenswrapper[4798]: I1011 03:56:14.944825 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bfd670e2-5a6d-4d56-97d6-c72b5474cfe6\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d79289a0b2f4d9eee836f57ba898bd329060194772e80855547f1982371b0921\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1970a648bf4ee991b9b58bfade524bee260b45f727cba5864a6545a10487b3ae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a79edde80930f3aad632c14d6ad6b3df6f8449f32107a82eb906785e873e1ab7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4b8e09f509a9f25922d49024be407873d79d01edb604193f9d19fcd6c89cf96d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://805f894634e76814dc3071315ddd3d8ac46cdf65a12e97f4ca56b98fe53ab25a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-11T03:55:19Z\\\",\\\"message\\\":\\\"W1011 03:55:08.564227 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1011 03:55:08.564630 1 crypto.go:601] Generating new CA for check-endpoints-signer@1760154908 cert, and key in /tmp/serving-cert-1527351980/serving-signer.crt, /tmp/serving-cert-1527351980/serving-signer.key\\\\nI1011 03:55:08.869630 1 observer_polling.go:159] Starting file observer\\\\nW1011 03:55:08.874583 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1011 03:55:08.874721 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1011 03:55:08.878058 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1527351980/tls.crt::/tmp/serving-cert-1527351980/tls.key\\\\\\\"\\\\nF1011 03:55:19.813421 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:08Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://58ca7c079d81ba990c4508c024d6db7c3503be1d628477798459b32e2374ef22\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:08Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1171e7b719c532d99d1d222fc5dbc434060d5d149bd2b6b3dea02f9716816b26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1171e7b719c532d99d1d222fc5dbc434060d5d149bd2b6b3dea02f9716816b26\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:05Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:14Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:14 crc kubenswrapper[4798]: I1011 03:56:14.965008 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2f4f8842-e378-44a9-b919-b1bf9a8df80e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://903e13c07c622087e30f21ffff2b60c31c852d8e994c23d7af6beab5a71bb873\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1087e7b8de486a192155ed94497edbca0dc98e4a84a577267fb132fc48e6b142\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://08a33c11cee6a48ebcf66f251111b8bd888bd40fe6b52f43a3768eac817ed59f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eff419f507fc5362e956419c6fd14dc6a161f7297b026a5ec449647051f98767\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:05Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:14Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:14 crc kubenswrapper[4798]: I1011 03:56:14.975698 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:14 crc kubenswrapper[4798]: I1011 03:56:14.975761 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:14 crc kubenswrapper[4798]: I1011 03:56:14.975777 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:14 crc kubenswrapper[4798]: I1011 03:56:14.975804 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:14 crc kubenswrapper[4798]: I1011 03:56:14.975819 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:14Z","lastTransitionTime":"2025-10-11T03:56:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:14 crc kubenswrapper[4798]: I1011 03:56:14.978273 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-5bfzt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3cf2b185-9a26-4448-8fc5-4885f98daf87\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jdt7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jdt7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:39Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-5bfzt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:14Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:14 crc kubenswrapper[4798]: I1011 03:56:14.992371 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d9be402e-40bb-4e2d-be8b-349646c34f1a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6897a04b7204031c861d74f6a4d475af9e9b88220cf02bfa50e7845c85cd5f70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b88dcbb90798e97c13f7e7ef7bc7171bb1dc33cf488bd453a8d263a02093dfd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b2fb58514dfb39402d4e10ca699975671a3c53a7fe54b374a987efa8ba401ccf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5844d6ac6f40fe8f9056708ee6ffe64a95b58a4dd50582e455860ce6e5628cd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5844d6ac6f40fe8f9056708ee6ffe64a95b58a4dd50582e455860ce6e5628cd3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:06Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:05Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:14Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:15 crc kubenswrapper[4798]: I1011 03:56:15.006274 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5ec61df088ad49a0392264c33da84b7324c4aa608201d71e309401ab80013fb8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://10697a88b117962307b632f70354acdfd493e92c2909908a31c90c754dcbc0ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:15Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:15 crc kubenswrapper[4798]: I1011 03:56:15.022565 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6129cc4f78a62c5fb76f4859ec954c8901954d27b7f05672e97d992e0c933191\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:15Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:15 crc kubenswrapper[4798]: I1011 03:56:15.036758 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-p6xdd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bd9c0e44-3329-422a-907b-e9e9bb6194cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:56:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:56:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b5b37de1a8a4b221f421a2bf4f4706f9b2e4ce65611438e10e1bf07daa2bf25a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a46e4bfff29af67bd6304e68df8fb4aa7e2184648b85398f453fde878b27d825\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-11T03:56:13Z\\\",\\\"message\\\":\\\"2025-10-11T03:55:27+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_80fdfc1b-8b4c-4573-acb4-89213eeebcec\\\\n2025-10-11T03:55:27+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_80fdfc1b-8b4c-4573-acb4-89213eeebcec to /host/opt/cni/bin/\\\\n2025-10-11T03:55:27Z [verbose] multus-daemon started\\\\n2025-10-11T03:55:27Z [verbose] Readiness Indicator file check\\\\n2025-10-11T03:56:12Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:56:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p9wst\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-p6xdd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:15Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:15 crc kubenswrapper[4798]: I1011 03:56:15.051219 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-5sxhs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"89cbb20d-9fdb-4fed-9ad2-acb1207afc46\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a3655b5826d719e04f0972221ea72554a15025acffa8e2689b4b2855d6151d9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z7jvd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f0c589033410abafbc39c6a7386343c70bb92ed0a1d7336188a44cc6e4fd65c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z7jvd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-5sxhs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:15Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:15 crc kubenswrapper[4798]: I1011 03:56:15.070120 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-7fk5l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4b6a179c-1d23-4cb2-91b9-d34496a74456\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5bfabccc875a12072ad81dc6bf74d849c99baa004c10d213f798c363248137db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b908ba64c3d6d1d45e1cab82e69176c3c84bf88dde3e9425fc7c3b4c84685ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8b908ba64c3d6d1d45e1cab82e69176c3c84bf88dde3e9425fc7c3b4c84685ed\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae38c0781a4a8367bba95648253e72409e06cdc39825b137084fa6f4d33a4cb7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ae38c0781a4a8367bba95648253e72409e06cdc39825b137084fa6f4d33a4cb7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b663ffff34d80940880b3959c81519084b55bd5edcedbce9402d5fad1f898034\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b663ffff34d80940880b3959c81519084b55bd5edcedbce9402d5fad1f898034\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5b4df13fbe7bbb9ba49f772c2bd4bfe6a10a17bfa12bc500f0647659be7b0cdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5b4df13fbe7bbb9ba49f772c2bd4bfe6a10a17bfa12bc500f0647659be7b0cdf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://997fd5172ccd393f7befe4343619dd30eba136428035ac9fc71f0ec99791a53b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://997fd5172ccd393f7befe4343619dd30eba136428035ac9fc71f0ec99791a53b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://20198b637812738041fd2e57cdb24097b8f92af726b1ad103fee423469a27136\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://20198b637812738041fd2e57cdb24097b8f92af726b1ad103fee423469a27136\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-7fk5l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:15Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:15 crc kubenswrapper[4798]: I1011 03:56:15.078871 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:15 crc kubenswrapper[4798]: I1011 03:56:15.078928 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:15 crc kubenswrapper[4798]: I1011 03:56:15.078938 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:15 crc kubenswrapper[4798]: I1011 03:56:15.078958 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:15 crc kubenswrapper[4798]: I1011 03:56:15.078969 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:15Z","lastTransitionTime":"2025-10-11T03:56:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:15 crc kubenswrapper[4798]: I1011 03:56:15.097587 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-svt4z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3c2a342b-5252-4957-9e2c-8f81c304b8af\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5a66de04a274f07f090f66d9da727e1985f9c171038de366b409a0300d1b6cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5667e3d7288833ff8c576f64f111c3a2f59619a3577f969d0afdd633962bd30d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dd63d88fe2ebe6035765ee3f3c05c2ef747f9b2869da239825c08667935c9fb5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://671945e25068ff79ab8574dd96b2d81646b8240da41f34a10aa857f0c2520df2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://337fb496a29d8027f6400b57b1625b68029a16e368139f753b9b94df22448477\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c9529e6c53d42e111799753c41ef620b382b2a2b1991aa71d4ee846029a5136\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a9f55c50b0f40e90d5078225583f31860d2cfb67776e6b11ebdc9a41d8b20baf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a9f55c50b0f40e90d5078225583f31860d2cfb67776e6b11ebdc9a41d8b20baf\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-11T03:55:52Z\\\",\\\"message\\\":\\\"UUIDName:}]\\\\nI1011 03:55:52.357804 6446 model_client.go:398] Mutate operations generated as: [{Op:mutate Table:Logical_Router Row:map[] Rows:[] Columns:[] Mutations:[{Column:nat Mutator:insert Value:{GoSet:[{GoUUID:73135118-cf1b-4568-bd31-2f50308bf69d}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {e3c4661a-36a6-47f0-a6c0-a4ee741f2224}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1011 03:55:52.356105 6446 obj_retry.go:303] Retry object setup: *v1.Pod openshift-kube-apiserver/kube-apiserver-crc\\\\nI1011 03:55:52.357905 6446 obj_retry.go:365] Adding new object: *v1.Pod openshift-kube-apiserver/kube-apiserver-crc\\\\nI1011 03:55:52.357951 6446 ovn.go:134] Ensuring zone local for Pod openshift-kube-apiserver/kube-apiserver-crc in node crc\\\\nI1011 03:55:52.357979 6446 obj_retry.go:386] Retry successful for *v1.Pod openshift-kube-apiserver/kube-apiserver-crc after 0 failed attempt(s)\\\\nI1011 03:55:52.358011 6446 default_network_controller.go:776] Recording success event on pod openshift-kube-apiserver/kube-apiserver-crc\\\\nI1011 03:55:52.358029 6446 services_controller.go:356] Processing sync for service openshift-ingress-operator/metrics for network=default\\\\nF1011 03:55:52.357999 6446 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:51Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-svt4z_openshift-ovn-kubernetes(3c2a342b-5252-4957-9e2c-8f81c304b8af)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://732433bbe785758ed4cbedf667608c4780a5d1e7e2eafd432735f0d74ea01d48\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9f8c139ffa0a5dc24b40f6d5bcc2fa9422f58b275692d3ce3ba5a928c1e454a7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f8c139ffa0a5dc24b40f6d5bcc2fa9422f58b275692d3ce3ba5a928c1e454a7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:26Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-svt4z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:15Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:15 crc kubenswrapper[4798]: I1011 03:56:15.113156 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-cwk9h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a97c4ffd-df63-4ce6-8903-80849a23a9fa\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3dc58b162f1d2b3447282c149ec7cd6d60b4c53fa3cf51dfb4a821d115c9a2c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pfvbf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:28Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-cwk9h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:15Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:15 crc kubenswrapper[4798]: I1011 03:56:15.131125 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:15Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:15 crc kubenswrapper[4798]: I1011 03:56:15.145761 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42571bc8-2186-4e3b-bba9-28f5a8f364d0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c66c40f71f0877eee058976f94625fe1c4bcab5dfad363be39e6863c2e9f07a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5gdw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d4862faa12fa49bd57098b8b4e9e9dd287da8c8808a9ca32bfdd3a9b4313945d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5gdw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-h28s2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:15Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:15 crc kubenswrapper[4798]: I1011 03:56:15.182151 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:15 crc kubenswrapper[4798]: I1011 03:56:15.182544 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:15 crc kubenswrapper[4798]: I1011 03:56:15.182633 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:15 crc kubenswrapper[4798]: I1011 03:56:15.182710 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:15 crc kubenswrapper[4798]: I1011 03:56:15.182777 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:15Z","lastTransitionTime":"2025-10-11T03:56:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:15 crc kubenswrapper[4798]: I1011 03:56:15.286487 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:15 crc kubenswrapper[4798]: I1011 03:56:15.286993 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:15 crc kubenswrapper[4798]: I1011 03:56:15.287241 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:15 crc kubenswrapper[4798]: I1011 03:56:15.287495 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:15 crc kubenswrapper[4798]: I1011 03:56:15.287769 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:15Z","lastTransitionTime":"2025-10-11T03:56:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:15 crc kubenswrapper[4798]: I1011 03:56:15.392187 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:15 crc kubenswrapper[4798]: I1011 03:56:15.392247 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:15 crc kubenswrapper[4798]: I1011 03:56:15.392265 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:15 crc kubenswrapper[4798]: I1011 03:56:15.392292 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:15 crc kubenswrapper[4798]: I1011 03:56:15.392311 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:15Z","lastTransitionTime":"2025-10-11T03:56:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:15 crc kubenswrapper[4798]: I1011 03:56:15.423225 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 11 03:56:15 crc kubenswrapper[4798]: I1011 03:56:15.423282 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 11 03:56:15 crc kubenswrapper[4798]: I1011 03:56:15.423275 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-5bfzt" Oct 11 03:56:15 crc kubenswrapper[4798]: I1011 03:56:15.423376 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 11 03:56:15 crc kubenswrapper[4798]: E1011 03:56:15.423560 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 11 03:56:15 crc kubenswrapper[4798]: E1011 03:56:15.423715 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 11 03:56:15 crc kubenswrapper[4798]: E1011 03:56:15.423982 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 11 03:56:15 crc kubenswrapper[4798]: E1011 03:56:15.424246 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-5bfzt" podUID="3cf2b185-9a26-4448-8fc5-4885f98daf87" Oct 11 03:56:15 crc kubenswrapper[4798]: I1011 03:56:15.443234 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d9be402e-40bb-4e2d-be8b-349646c34f1a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6897a04b7204031c861d74f6a4d475af9e9b88220cf02bfa50e7845c85cd5f70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b88dcbb90798e97c13f7e7ef7bc7171bb1dc33cf488bd453a8d263a02093dfd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b2fb58514dfb39402d4e10ca699975671a3c53a7fe54b374a987efa8ba401ccf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5844d6ac6f40fe8f9056708ee6ffe64a95b58a4dd50582e455860ce6e5628cd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5844d6ac6f40fe8f9056708ee6ffe64a95b58a4dd50582e455860ce6e5628cd3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:06Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:05Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:15Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:15 crc kubenswrapper[4798]: I1011 03:56:15.463268 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5ec61df088ad49a0392264c33da84b7324c4aa608201d71e309401ab80013fb8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://10697a88b117962307b632f70354acdfd493e92c2909908a31c90c754dcbc0ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:15Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:15 crc kubenswrapper[4798]: I1011 03:56:15.480805 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-5bfzt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3cf2b185-9a26-4448-8fc5-4885f98daf87\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jdt7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jdt7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:39Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-5bfzt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:15Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:15 crc kubenswrapper[4798]: I1011 03:56:15.496440 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:15 crc kubenswrapper[4798]: I1011 03:56:15.496488 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:15 crc kubenswrapper[4798]: I1011 03:56:15.496498 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:15 crc kubenswrapper[4798]: I1011 03:56:15.496524 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:15 crc kubenswrapper[4798]: I1011 03:56:15.496536 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:15Z","lastTransitionTime":"2025-10-11T03:56:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:15 crc kubenswrapper[4798]: I1011 03:56:15.496943 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6129cc4f78a62c5fb76f4859ec954c8901954d27b7f05672e97d992e0c933191\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:15Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:15 crc kubenswrapper[4798]: I1011 03:56:15.514726 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-p6xdd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bd9c0e44-3329-422a-907b-e9e9bb6194cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:56:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:56:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b5b37de1a8a4b221f421a2bf4f4706f9b2e4ce65611438e10e1bf07daa2bf25a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a46e4bfff29af67bd6304e68df8fb4aa7e2184648b85398f453fde878b27d825\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-11T03:56:13Z\\\",\\\"message\\\":\\\"2025-10-11T03:55:27+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_80fdfc1b-8b4c-4573-acb4-89213eeebcec\\\\n2025-10-11T03:55:27+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_80fdfc1b-8b4c-4573-acb4-89213eeebcec to /host/opt/cni/bin/\\\\n2025-10-11T03:55:27Z [verbose] multus-daemon started\\\\n2025-10-11T03:55:27Z [verbose] Readiness Indicator file check\\\\n2025-10-11T03:56:12Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:56:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p9wst\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-p6xdd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:15Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:15 crc kubenswrapper[4798]: I1011 03:56:15.529636 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-5sxhs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"89cbb20d-9fdb-4fed-9ad2-acb1207afc46\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a3655b5826d719e04f0972221ea72554a15025acffa8e2689b4b2855d6151d9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z7jvd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f0c589033410abafbc39c6a7386343c70bb92ed0a1d7336188a44cc6e4fd65c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z7jvd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-5sxhs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:15Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:15 crc kubenswrapper[4798]: I1011 03:56:15.545074 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:15Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:15 crc kubenswrapper[4798]: I1011 03:56:15.557493 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42571bc8-2186-4e3b-bba9-28f5a8f364d0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c66c40f71f0877eee058976f94625fe1c4bcab5dfad363be39e6863c2e9f07a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5gdw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d4862faa12fa49bd57098b8b4e9e9dd287da8c8808a9ca32bfdd3a9b4313945d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5gdw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-h28s2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:15Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:15 crc kubenswrapper[4798]: I1011 03:56:15.575172 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-7fk5l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4b6a179c-1d23-4cb2-91b9-d34496a74456\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5bfabccc875a12072ad81dc6bf74d849c99baa004c10d213f798c363248137db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b908ba64c3d6d1d45e1cab82e69176c3c84bf88dde3e9425fc7c3b4c84685ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8b908ba64c3d6d1d45e1cab82e69176c3c84bf88dde3e9425fc7c3b4c84685ed\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae38c0781a4a8367bba95648253e72409e06cdc39825b137084fa6f4d33a4cb7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ae38c0781a4a8367bba95648253e72409e06cdc39825b137084fa6f4d33a4cb7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b663ffff34d80940880b3959c81519084b55bd5edcedbce9402d5fad1f898034\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b663ffff34d80940880b3959c81519084b55bd5edcedbce9402d5fad1f898034\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5b4df13fbe7bbb9ba49f772c2bd4bfe6a10a17bfa12bc500f0647659be7b0cdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5b4df13fbe7bbb9ba49f772c2bd4bfe6a10a17bfa12bc500f0647659be7b0cdf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://997fd5172ccd393f7befe4343619dd30eba136428035ac9fc71f0ec99791a53b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://997fd5172ccd393f7befe4343619dd30eba136428035ac9fc71f0ec99791a53b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://20198b637812738041fd2e57cdb24097b8f92af726b1ad103fee423469a27136\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://20198b637812738041fd2e57cdb24097b8f92af726b1ad103fee423469a27136\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-7fk5l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:15Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:15 crc kubenswrapper[4798]: I1011 03:56:15.598919 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-svt4z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3c2a342b-5252-4957-9e2c-8f81c304b8af\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5a66de04a274f07f090f66d9da727e1985f9c171038de366b409a0300d1b6cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5667e3d7288833ff8c576f64f111c3a2f59619a3577f969d0afdd633962bd30d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dd63d88fe2ebe6035765ee3f3c05c2ef747f9b2869da239825c08667935c9fb5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://671945e25068ff79ab8574dd96b2d81646b8240da41f34a10aa857f0c2520df2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://337fb496a29d8027f6400b57b1625b68029a16e368139f753b9b94df22448477\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c9529e6c53d42e111799753c41ef620b382b2a2b1991aa71d4ee846029a5136\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a9f55c50b0f40e90d5078225583f31860d2cfb67776e6b11ebdc9a41d8b20baf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a9f55c50b0f40e90d5078225583f31860d2cfb67776e6b11ebdc9a41d8b20baf\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-11T03:55:52Z\\\",\\\"message\\\":\\\"UUIDName:}]\\\\nI1011 03:55:52.357804 6446 model_client.go:398] Mutate operations generated as: [{Op:mutate Table:Logical_Router Row:map[] Rows:[] Columns:[] Mutations:[{Column:nat Mutator:insert Value:{GoSet:[{GoUUID:73135118-cf1b-4568-bd31-2f50308bf69d}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {e3c4661a-36a6-47f0-a6c0-a4ee741f2224}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1011 03:55:52.356105 6446 obj_retry.go:303] Retry object setup: *v1.Pod openshift-kube-apiserver/kube-apiserver-crc\\\\nI1011 03:55:52.357905 6446 obj_retry.go:365] Adding new object: *v1.Pod openshift-kube-apiserver/kube-apiserver-crc\\\\nI1011 03:55:52.357951 6446 ovn.go:134] Ensuring zone local for Pod openshift-kube-apiserver/kube-apiserver-crc in node crc\\\\nI1011 03:55:52.357979 6446 obj_retry.go:386] Retry successful for *v1.Pod openshift-kube-apiserver/kube-apiserver-crc after 0 failed attempt(s)\\\\nI1011 03:55:52.358011 6446 default_network_controller.go:776] Recording success event on pod openshift-kube-apiserver/kube-apiserver-crc\\\\nI1011 03:55:52.358029 6446 services_controller.go:356] Processing sync for service openshift-ingress-operator/metrics for network=default\\\\nF1011 03:55:52.357999 6446 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:51Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-svt4z_openshift-ovn-kubernetes(3c2a342b-5252-4957-9e2c-8f81c304b8af)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://732433bbe785758ed4cbedf667608c4780a5d1e7e2eafd432735f0d74ea01d48\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9f8c139ffa0a5dc24b40f6d5bcc2fa9422f58b275692d3ce3ba5a928c1e454a7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f8c139ffa0a5dc24b40f6d5bcc2fa9422f58b275692d3ce3ba5a928c1e454a7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:26Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-svt4z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:15Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:15 crc kubenswrapper[4798]: I1011 03:56:15.602348 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:15 crc kubenswrapper[4798]: I1011 03:56:15.602434 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:15 crc kubenswrapper[4798]: I1011 03:56:15.602452 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:15 crc kubenswrapper[4798]: I1011 03:56:15.602478 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:15 crc kubenswrapper[4798]: I1011 03:56:15.602509 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:15Z","lastTransitionTime":"2025-10-11T03:56:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:15 crc kubenswrapper[4798]: I1011 03:56:15.612677 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-cwk9h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a97c4ffd-df63-4ce6-8903-80849a23a9fa\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3dc58b162f1d2b3447282c149ec7cd6d60b4c53fa3cf51dfb4a821d115c9a2c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pfvbf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:28Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-cwk9h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:15Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:15 crc kubenswrapper[4798]: I1011 03:56:15.633747 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bfd670e2-5a6d-4d56-97d6-c72b5474cfe6\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d79289a0b2f4d9eee836f57ba898bd329060194772e80855547f1982371b0921\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1970a648bf4ee991b9b58bfade524bee260b45f727cba5864a6545a10487b3ae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a79edde80930f3aad632c14d6ad6b3df6f8449f32107a82eb906785e873e1ab7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4b8e09f509a9f25922d49024be407873d79d01edb604193f9d19fcd6c89cf96d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://805f894634e76814dc3071315ddd3d8ac46cdf65a12e97f4ca56b98fe53ab25a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-11T03:55:19Z\\\",\\\"message\\\":\\\"W1011 03:55:08.564227 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1011 03:55:08.564630 1 crypto.go:601] Generating new CA for check-endpoints-signer@1760154908 cert, and key in /tmp/serving-cert-1527351980/serving-signer.crt, /tmp/serving-cert-1527351980/serving-signer.key\\\\nI1011 03:55:08.869630 1 observer_polling.go:159] Starting file observer\\\\nW1011 03:55:08.874583 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1011 03:55:08.874721 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1011 03:55:08.878058 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1527351980/tls.crt::/tmp/serving-cert-1527351980/tls.key\\\\\\\"\\\\nF1011 03:55:19.813421 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:08Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://58ca7c079d81ba990c4508c024d6db7c3503be1d628477798459b32e2374ef22\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:08Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1171e7b719c532d99d1d222fc5dbc434060d5d149bd2b6b3dea02f9716816b26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1171e7b719c532d99d1d222fc5dbc434060d5d149bd2b6b3dea02f9716816b26\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:05Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:15Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:15 crc kubenswrapper[4798]: I1011 03:56:15.654524 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2f4f8842-e378-44a9-b919-b1bf9a8df80e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://903e13c07c622087e30f21ffff2b60c31c852d8e994c23d7af6beab5a71bb873\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1087e7b8de486a192155ed94497edbca0dc98e4a84a577267fb132fc48e6b142\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://08a33c11cee6a48ebcf66f251111b8bd888bd40fe6b52f43a3768eac817ed59f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eff419f507fc5362e956419c6fd14dc6a161f7297b026a5ec449647051f98767\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:05Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:15Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:15 crc kubenswrapper[4798]: I1011 03:56:15.674275 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4936cbe98df337a2b4d0d6e3effe0afd990facff65daf923c475c9108e05fef9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:15Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:15 crc kubenswrapper[4798]: I1011 03:56:15.688050 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:15Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:15 crc kubenswrapper[4798]: I1011 03:56:15.698541 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kszgd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e999fc8d-dd8d-415f-b72b-1c6189cf18a9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a92a11778001614d226c95af9dde787c78232568c3b5c073618a843f1225b21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h6wm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kszgd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:15Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:15 crc kubenswrapper[4798]: I1011 03:56:15.705360 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:15 crc kubenswrapper[4798]: I1011 03:56:15.705435 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:15 crc kubenswrapper[4798]: I1011 03:56:15.705454 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:15 crc kubenswrapper[4798]: I1011 03:56:15.705484 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:15 crc kubenswrapper[4798]: I1011 03:56:15.705502 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:15Z","lastTransitionTime":"2025-10-11T03:56:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:15 crc kubenswrapper[4798]: I1011 03:56:15.711104 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:15Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:15 crc kubenswrapper[4798]: I1011 03:56:15.808255 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:15 crc kubenswrapper[4798]: I1011 03:56:15.808327 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:15 crc kubenswrapper[4798]: I1011 03:56:15.808345 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:15 crc kubenswrapper[4798]: I1011 03:56:15.808376 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:15 crc kubenswrapper[4798]: I1011 03:56:15.808422 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:15Z","lastTransitionTime":"2025-10-11T03:56:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:15 crc kubenswrapper[4798]: I1011 03:56:15.912231 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:15 crc kubenswrapper[4798]: I1011 03:56:15.912421 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:15 crc kubenswrapper[4798]: I1011 03:56:15.912435 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:15 crc kubenswrapper[4798]: I1011 03:56:15.912469 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:15 crc kubenswrapper[4798]: I1011 03:56:15.912479 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:15Z","lastTransitionTime":"2025-10-11T03:56:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:16 crc kubenswrapper[4798]: I1011 03:56:16.015531 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:16 crc kubenswrapper[4798]: I1011 03:56:16.015660 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:16 crc kubenswrapper[4798]: I1011 03:56:16.015691 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:16 crc kubenswrapper[4798]: I1011 03:56:16.015724 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:16 crc kubenswrapper[4798]: I1011 03:56:16.015745 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:16Z","lastTransitionTime":"2025-10-11T03:56:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:16 crc kubenswrapper[4798]: I1011 03:56:16.119435 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:16 crc kubenswrapper[4798]: I1011 03:56:16.119945 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:16 crc kubenswrapper[4798]: I1011 03:56:16.120081 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:16 crc kubenswrapper[4798]: I1011 03:56:16.120292 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:16 crc kubenswrapper[4798]: I1011 03:56:16.120497 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:16Z","lastTransitionTime":"2025-10-11T03:56:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:16 crc kubenswrapper[4798]: I1011 03:56:16.223308 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:16 crc kubenswrapper[4798]: I1011 03:56:16.223775 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:16 crc kubenswrapper[4798]: I1011 03:56:16.224046 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:16 crc kubenswrapper[4798]: I1011 03:56:16.224286 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:16 crc kubenswrapper[4798]: I1011 03:56:16.224487 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:16Z","lastTransitionTime":"2025-10-11T03:56:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:16 crc kubenswrapper[4798]: I1011 03:56:16.327173 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:16 crc kubenswrapper[4798]: I1011 03:56:16.327218 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:16 crc kubenswrapper[4798]: I1011 03:56:16.327230 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:16 crc kubenswrapper[4798]: I1011 03:56:16.327249 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:16 crc kubenswrapper[4798]: I1011 03:56:16.327268 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:16Z","lastTransitionTime":"2025-10-11T03:56:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:16 crc kubenswrapper[4798]: I1011 03:56:16.430655 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:16 crc kubenswrapper[4798]: I1011 03:56:16.430698 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:16 crc kubenswrapper[4798]: I1011 03:56:16.430708 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:16 crc kubenswrapper[4798]: I1011 03:56:16.430726 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:16 crc kubenswrapper[4798]: I1011 03:56:16.430739 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:16Z","lastTransitionTime":"2025-10-11T03:56:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:16 crc kubenswrapper[4798]: I1011 03:56:16.536938 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:16 crc kubenswrapper[4798]: I1011 03:56:16.536985 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:16 crc kubenswrapper[4798]: I1011 03:56:16.536995 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:16 crc kubenswrapper[4798]: I1011 03:56:16.537030 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:16 crc kubenswrapper[4798]: I1011 03:56:16.537041 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:16Z","lastTransitionTime":"2025-10-11T03:56:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:16 crc kubenswrapper[4798]: I1011 03:56:16.641295 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:16 crc kubenswrapper[4798]: I1011 03:56:16.641345 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:16 crc kubenswrapper[4798]: I1011 03:56:16.641356 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:16 crc kubenswrapper[4798]: I1011 03:56:16.641373 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:16 crc kubenswrapper[4798]: I1011 03:56:16.641385 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:16Z","lastTransitionTime":"2025-10-11T03:56:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:16 crc kubenswrapper[4798]: I1011 03:56:16.744613 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:16 crc kubenswrapper[4798]: I1011 03:56:16.744654 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:16 crc kubenswrapper[4798]: I1011 03:56:16.744665 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:16 crc kubenswrapper[4798]: I1011 03:56:16.744680 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:16 crc kubenswrapper[4798]: I1011 03:56:16.744689 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:16Z","lastTransitionTime":"2025-10-11T03:56:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:16 crc kubenswrapper[4798]: I1011 03:56:16.847783 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:16 crc kubenswrapper[4798]: I1011 03:56:16.847873 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:16 crc kubenswrapper[4798]: I1011 03:56:16.847902 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:16 crc kubenswrapper[4798]: I1011 03:56:16.847937 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:16 crc kubenswrapper[4798]: I1011 03:56:16.847957 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:16Z","lastTransitionTime":"2025-10-11T03:56:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:16 crc kubenswrapper[4798]: I1011 03:56:16.951510 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:16 crc kubenswrapper[4798]: I1011 03:56:16.951591 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:16 crc kubenswrapper[4798]: I1011 03:56:16.951610 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:16 crc kubenswrapper[4798]: I1011 03:56:16.951637 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:16 crc kubenswrapper[4798]: I1011 03:56:16.951661 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:16Z","lastTransitionTime":"2025-10-11T03:56:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:17 crc kubenswrapper[4798]: I1011 03:56:17.056499 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:17 crc kubenswrapper[4798]: I1011 03:56:17.056575 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:17 crc kubenswrapper[4798]: I1011 03:56:17.056594 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:17 crc kubenswrapper[4798]: I1011 03:56:17.056623 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:17 crc kubenswrapper[4798]: I1011 03:56:17.056642 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:17Z","lastTransitionTime":"2025-10-11T03:56:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:17 crc kubenswrapper[4798]: I1011 03:56:17.159525 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:17 crc kubenswrapper[4798]: I1011 03:56:17.159588 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:17 crc kubenswrapper[4798]: I1011 03:56:17.159601 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:17 crc kubenswrapper[4798]: I1011 03:56:17.159622 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:17 crc kubenswrapper[4798]: I1011 03:56:17.159666 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:17Z","lastTransitionTime":"2025-10-11T03:56:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:17 crc kubenswrapper[4798]: I1011 03:56:17.263441 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:17 crc kubenswrapper[4798]: I1011 03:56:17.263518 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:17 crc kubenswrapper[4798]: I1011 03:56:17.263538 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:17 crc kubenswrapper[4798]: I1011 03:56:17.264121 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:17 crc kubenswrapper[4798]: I1011 03:56:17.264194 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:17Z","lastTransitionTime":"2025-10-11T03:56:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:17 crc kubenswrapper[4798]: I1011 03:56:17.367153 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:17 crc kubenswrapper[4798]: I1011 03:56:17.367214 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:17 crc kubenswrapper[4798]: I1011 03:56:17.367232 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:17 crc kubenswrapper[4798]: I1011 03:56:17.367260 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:17 crc kubenswrapper[4798]: I1011 03:56:17.367276 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:17Z","lastTransitionTime":"2025-10-11T03:56:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:17 crc kubenswrapper[4798]: I1011 03:56:17.423427 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 11 03:56:17 crc kubenswrapper[4798]: E1011 03:56:17.423707 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 11 03:56:17 crc kubenswrapper[4798]: I1011 03:56:17.424123 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 11 03:56:17 crc kubenswrapper[4798]: I1011 03:56:17.424199 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-5bfzt" Oct 11 03:56:17 crc kubenswrapper[4798]: I1011 03:56:17.424267 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 11 03:56:17 crc kubenswrapper[4798]: E1011 03:56:17.424286 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 11 03:56:17 crc kubenswrapper[4798]: E1011 03:56:17.424469 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-5bfzt" podUID="3cf2b185-9a26-4448-8fc5-4885f98daf87" Oct 11 03:56:17 crc kubenswrapper[4798]: E1011 03:56:17.424642 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 11 03:56:17 crc kubenswrapper[4798]: I1011 03:56:17.470566 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:17 crc kubenswrapper[4798]: I1011 03:56:17.470632 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:17 crc kubenswrapper[4798]: I1011 03:56:17.470646 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:17 crc kubenswrapper[4798]: I1011 03:56:17.470668 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:17 crc kubenswrapper[4798]: I1011 03:56:17.470681 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:17Z","lastTransitionTime":"2025-10-11T03:56:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:17 crc kubenswrapper[4798]: I1011 03:56:17.573919 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:17 crc kubenswrapper[4798]: I1011 03:56:17.573998 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:17 crc kubenswrapper[4798]: I1011 03:56:17.574021 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:17 crc kubenswrapper[4798]: I1011 03:56:17.574058 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:17 crc kubenswrapper[4798]: I1011 03:56:17.574080 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:17Z","lastTransitionTime":"2025-10-11T03:56:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:17 crc kubenswrapper[4798]: I1011 03:56:17.677715 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:17 crc kubenswrapper[4798]: I1011 03:56:17.677773 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:17 crc kubenswrapper[4798]: I1011 03:56:17.677792 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:17 crc kubenswrapper[4798]: I1011 03:56:17.677821 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:17 crc kubenswrapper[4798]: I1011 03:56:17.677843 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:17Z","lastTransitionTime":"2025-10-11T03:56:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:17 crc kubenswrapper[4798]: I1011 03:56:17.781764 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:17 crc kubenswrapper[4798]: I1011 03:56:17.781854 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:17 crc kubenswrapper[4798]: I1011 03:56:17.781889 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:17 crc kubenswrapper[4798]: I1011 03:56:17.781925 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:17 crc kubenswrapper[4798]: I1011 03:56:17.781951 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:17Z","lastTransitionTime":"2025-10-11T03:56:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:17 crc kubenswrapper[4798]: I1011 03:56:17.887094 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:17 crc kubenswrapper[4798]: I1011 03:56:17.887162 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:17 crc kubenswrapper[4798]: I1011 03:56:17.887181 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:17 crc kubenswrapper[4798]: I1011 03:56:17.887207 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:17 crc kubenswrapper[4798]: I1011 03:56:17.887225 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:17Z","lastTransitionTime":"2025-10-11T03:56:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:17 crc kubenswrapper[4798]: I1011 03:56:17.990309 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:17 crc kubenswrapper[4798]: I1011 03:56:17.990370 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:17 crc kubenswrapper[4798]: I1011 03:56:17.990380 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:17 crc kubenswrapper[4798]: I1011 03:56:17.990418 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:17 crc kubenswrapper[4798]: I1011 03:56:17.990432 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:17Z","lastTransitionTime":"2025-10-11T03:56:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:18 crc kubenswrapper[4798]: I1011 03:56:18.093955 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:18 crc kubenswrapper[4798]: I1011 03:56:18.094017 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:18 crc kubenswrapper[4798]: I1011 03:56:18.094036 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:18 crc kubenswrapper[4798]: I1011 03:56:18.094061 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:18 crc kubenswrapper[4798]: I1011 03:56:18.094077 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:18Z","lastTransitionTime":"2025-10-11T03:56:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:18 crc kubenswrapper[4798]: I1011 03:56:18.197638 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:18 crc kubenswrapper[4798]: I1011 03:56:18.197711 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:18 crc kubenswrapper[4798]: I1011 03:56:18.197729 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:18 crc kubenswrapper[4798]: I1011 03:56:18.197754 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:18 crc kubenswrapper[4798]: I1011 03:56:18.197769 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:18Z","lastTransitionTime":"2025-10-11T03:56:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:18 crc kubenswrapper[4798]: I1011 03:56:18.302500 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:18 crc kubenswrapper[4798]: I1011 03:56:18.302584 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:18 crc kubenswrapper[4798]: I1011 03:56:18.302610 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:18 crc kubenswrapper[4798]: I1011 03:56:18.302640 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:18 crc kubenswrapper[4798]: I1011 03:56:18.302660 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:18Z","lastTransitionTime":"2025-10-11T03:56:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:18 crc kubenswrapper[4798]: I1011 03:56:18.406855 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:18 crc kubenswrapper[4798]: I1011 03:56:18.406928 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:18 crc kubenswrapper[4798]: I1011 03:56:18.406955 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:18 crc kubenswrapper[4798]: I1011 03:56:18.406990 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:18 crc kubenswrapper[4798]: I1011 03:56:18.407021 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:18Z","lastTransitionTime":"2025-10-11T03:56:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:18 crc kubenswrapper[4798]: I1011 03:56:18.510137 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:18 crc kubenswrapper[4798]: I1011 03:56:18.510196 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:18 crc kubenswrapper[4798]: I1011 03:56:18.510214 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:18 crc kubenswrapper[4798]: I1011 03:56:18.510239 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:18 crc kubenswrapper[4798]: I1011 03:56:18.510259 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:18Z","lastTransitionTime":"2025-10-11T03:56:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:18 crc kubenswrapper[4798]: I1011 03:56:18.614508 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:18 crc kubenswrapper[4798]: I1011 03:56:18.614589 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:18 crc kubenswrapper[4798]: I1011 03:56:18.614650 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:18 crc kubenswrapper[4798]: I1011 03:56:18.614689 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:18 crc kubenswrapper[4798]: I1011 03:56:18.614710 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:18Z","lastTransitionTime":"2025-10-11T03:56:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:18 crc kubenswrapper[4798]: I1011 03:56:18.719277 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:18 crc kubenswrapper[4798]: I1011 03:56:18.719370 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:18 crc kubenswrapper[4798]: I1011 03:56:18.719388 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:18 crc kubenswrapper[4798]: I1011 03:56:18.719449 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:18 crc kubenswrapper[4798]: I1011 03:56:18.719472 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:18Z","lastTransitionTime":"2025-10-11T03:56:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:18 crc kubenswrapper[4798]: I1011 03:56:18.822202 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:18 crc kubenswrapper[4798]: I1011 03:56:18.822264 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:18 crc kubenswrapper[4798]: I1011 03:56:18.822278 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:18 crc kubenswrapper[4798]: I1011 03:56:18.822300 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:18 crc kubenswrapper[4798]: I1011 03:56:18.822317 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:18Z","lastTransitionTime":"2025-10-11T03:56:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:18 crc kubenswrapper[4798]: I1011 03:56:18.926003 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:18 crc kubenswrapper[4798]: I1011 03:56:18.926055 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:18 crc kubenswrapper[4798]: I1011 03:56:18.926073 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:18 crc kubenswrapper[4798]: I1011 03:56:18.926101 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:18 crc kubenswrapper[4798]: I1011 03:56:18.926121 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:18Z","lastTransitionTime":"2025-10-11T03:56:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:19 crc kubenswrapper[4798]: I1011 03:56:19.030174 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:19 crc kubenswrapper[4798]: I1011 03:56:19.030237 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:19 crc kubenswrapper[4798]: I1011 03:56:19.030255 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:19 crc kubenswrapper[4798]: I1011 03:56:19.030285 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:19 crc kubenswrapper[4798]: I1011 03:56:19.030310 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:19Z","lastTransitionTime":"2025-10-11T03:56:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:19 crc kubenswrapper[4798]: I1011 03:56:19.134381 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:19 crc kubenswrapper[4798]: I1011 03:56:19.134488 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:19 crc kubenswrapper[4798]: I1011 03:56:19.134507 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:19 crc kubenswrapper[4798]: I1011 03:56:19.134535 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:19 crc kubenswrapper[4798]: I1011 03:56:19.134556 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:19Z","lastTransitionTime":"2025-10-11T03:56:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:19 crc kubenswrapper[4798]: I1011 03:56:19.237930 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:19 crc kubenswrapper[4798]: I1011 03:56:19.238009 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:19 crc kubenswrapper[4798]: I1011 03:56:19.238027 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:19 crc kubenswrapper[4798]: I1011 03:56:19.238054 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:19 crc kubenswrapper[4798]: I1011 03:56:19.238075 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:19Z","lastTransitionTime":"2025-10-11T03:56:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:19 crc kubenswrapper[4798]: I1011 03:56:19.340904 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:19 crc kubenswrapper[4798]: I1011 03:56:19.340987 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:19 crc kubenswrapper[4798]: I1011 03:56:19.341007 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:19 crc kubenswrapper[4798]: I1011 03:56:19.341037 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:19 crc kubenswrapper[4798]: I1011 03:56:19.341058 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:19Z","lastTransitionTime":"2025-10-11T03:56:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:19 crc kubenswrapper[4798]: I1011 03:56:19.423624 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 11 03:56:19 crc kubenswrapper[4798]: I1011 03:56:19.423735 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 11 03:56:19 crc kubenswrapper[4798]: I1011 03:56:19.423735 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-5bfzt" Oct 11 03:56:19 crc kubenswrapper[4798]: E1011 03:56:19.423851 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 11 03:56:19 crc kubenswrapper[4798]: I1011 03:56:19.424006 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 11 03:56:19 crc kubenswrapper[4798]: E1011 03:56:19.424023 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-5bfzt" podUID="3cf2b185-9a26-4448-8fc5-4885f98daf87" Oct 11 03:56:19 crc kubenswrapper[4798]: E1011 03:56:19.424081 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 11 03:56:19 crc kubenswrapper[4798]: E1011 03:56:19.424132 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 11 03:56:19 crc kubenswrapper[4798]: I1011 03:56:19.443281 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:19 crc kubenswrapper[4798]: I1011 03:56:19.443335 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:19 crc kubenswrapper[4798]: I1011 03:56:19.443353 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:19 crc kubenswrapper[4798]: I1011 03:56:19.443377 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:19 crc kubenswrapper[4798]: I1011 03:56:19.443431 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:19Z","lastTransitionTime":"2025-10-11T03:56:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:19 crc kubenswrapper[4798]: I1011 03:56:19.547063 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:19 crc kubenswrapper[4798]: I1011 03:56:19.547127 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:19 crc kubenswrapper[4798]: I1011 03:56:19.547141 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:19 crc kubenswrapper[4798]: I1011 03:56:19.547162 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:19 crc kubenswrapper[4798]: I1011 03:56:19.547173 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:19Z","lastTransitionTime":"2025-10-11T03:56:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:19 crc kubenswrapper[4798]: I1011 03:56:19.650268 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:19 crc kubenswrapper[4798]: I1011 03:56:19.650341 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:19 crc kubenswrapper[4798]: I1011 03:56:19.650352 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:19 crc kubenswrapper[4798]: I1011 03:56:19.650371 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:19 crc kubenswrapper[4798]: I1011 03:56:19.650383 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:19Z","lastTransitionTime":"2025-10-11T03:56:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:19 crc kubenswrapper[4798]: I1011 03:56:19.753010 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:19 crc kubenswrapper[4798]: I1011 03:56:19.753042 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:19 crc kubenswrapper[4798]: I1011 03:56:19.753050 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:19 crc kubenswrapper[4798]: I1011 03:56:19.753062 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:19 crc kubenswrapper[4798]: I1011 03:56:19.753070 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:19Z","lastTransitionTime":"2025-10-11T03:56:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:19 crc kubenswrapper[4798]: I1011 03:56:19.856175 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:19 crc kubenswrapper[4798]: I1011 03:56:19.856219 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:19 crc kubenswrapper[4798]: I1011 03:56:19.856232 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:19 crc kubenswrapper[4798]: I1011 03:56:19.856249 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:19 crc kubenswrapper[4798]: I1011 03:56:19.856261 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:19Z","lastTransitionTime":"2025-10-11T03:56:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:19 crc kubenswrapper[4798]: I1011 03:56:19.959055 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:19 crc kubenswrapper[4798]: I1011 03:56:19.959093 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:19 crc kubenswrapper[4798]: I1011 03:56:19.959105 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:19 crc kubenswrapper[4798]: I1011 03:56:19.959121 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:19 crc kubenswrapper[4798]: I1011 03:56:19.959133 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:19Z","lastTransitionTime":"2025-10-11T03:56:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:20 crc kubenswrapper[4798]: I1011 03:56:20.062467 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:20 crc kubenswrapper[4798]: I1011 03:56:20.062580 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:20 crc kubenswrapper[4798]: I1011 03:56:20.062594 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:20 crc kubenswrapper[4798]: I1011 03:56:20.062616 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:20 crc kubenswrapper[4798]: I1011 03:56:20.062629 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:20Z","lastTransitionTime":"2025-10-11T03:56:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:20 crc kubenswrapper[4798]: I1011 03:56:20.165826 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:20 crc kubenswrapper[4798]: I1011 03:56:20.165874 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:20 crc kubenswrapper[4798]: I1011 03:56:20.165884 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:20 crc kubenswrapper[4798]: I1011 03:56:20.165900 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:20 crc kubenswrapper[4798]: I1011 03:56:20.165912 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:20Z","lastTransitionTime":"2025-10-11T03:56:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:20 crc kubenswrapper[4798]: I1011 03:56:20.269523 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:20 crc kubenswrapper[4798]: I1011 03:56:20.269672 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:20 crc kubenswrapper[4798]: I1011 03:56:20.269696 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:20 crc kubenswrapper[4798]: I1011 03:56:20.269729 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:20 crc kubenswrapper[4798]: I1011 03:56:20.269751 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:20Z","lastTransitionTime":"2025-10-11T03:56:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:20 crc kubenswrapper[4798]: I1011 03:56:20.373312 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:20 crc kubenswrapper[4798]: I1011 03:56:20.373653 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:20 crc kubenswrapper[4798]: I1011 03:56:20.373712 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:20 crc kubenswrapper[4798]: I1011 03:56:20.373741 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:20 crc kubenswrapper[4798]: I1011 03:56:20.374067 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:20Z","lastTransitionTime":"2025-10-11T03:56:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:20 crc kubenswrapper[4798]: I1011 03:56:20.425011 4798 scope.go:117] "RemoveContainer" containerID="a9f55c50b0f40e90d5078225583f31860d2cfb67776e6b11ebdc9a41d8b20baf" Oct 11 03:56:20 crc kubenswrapper[4798]: I1011 03:56:20.477097 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:20 crc kubenswrapper[4798]: I1011 03:56:20.477194 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:20 crc kubenswrapper[4798]: I1011 03:56:20.477207 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:20 crc kubenswrapper[4798]: I1011 03:56:20.477226 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:20 crc kubenswrapper[4798]: I1011 03:56:20.477240 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:20Z","lastTransitionTime":"2025-10-11T03:56:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:20 crc kubenswrapper[4798]: I1011 03:56:20.580509 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:20 crc kubenswrapper[4798]: I1011 03:56:20.580633 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:20 crc kubenswrapper[4798]: I1011 03:56:20.580650 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:20 crc kubenswrapper[4798]: I1011 03:56:20.580667 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:20 crc kubenswrapper[4798]: I1011 03:56:20.580972 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:20Z","lastTransitionTime":"2025-10-11T03:56:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:20 crc kubenswrapper[4798]: I1011 03:56:20.683167 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:20 crc kubenswrapper[4798]: I1011 03:56:20.683210 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:20 crc kubenswrapper[4798]: I1011 03:56:20.683221 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:20 crc kubenswrapper[4798]: I1011 03:56:20.683238 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:20 crc kubenswrapper[4798]: I1011 03:56:20.683250 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:20Z","lastTransitionTime":"2025-10-11T03:56:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:20 crc kubenswrapper[4798]: I1011 03:56:20.786625 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:20 crc kubenswrapper[4798]: I1011 03:56:20.786660 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:20 crc kubenswrapper[4798]: I1011 03:56:20.786674 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:20 crc kubenswrapper[4798]: I1011 03:56:20.786692 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:20 crc kubenswrapper[4798]: I1011 03:56:20.786704 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:20Z","lastTransitionTime":"2025-10-11T03:56:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:20 crc kubenswrapper[4798]: I1011 03:56:20.889595 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:20 crc kubenswrapper[4798]: I1011 03:56:20.889638 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:20 crc kubenswrapper[4798]: I1011 03:56:20.889653 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:20 crc kubenswrapper[4798]: I1011 03:56:20.889675 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:20 crc kubenswrapper[4798]: I1011 03:56:20.889691 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:20Z","lastTransitionTime":"2025-10-11T03:56:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:20 crc kubenswrapper[4798]: I1011 03:56:20.900793 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-svt4z_3c2a342b-5252-4957-9e2c-8f81c304b8af/ovnkube-controller/2.log" Oct 11 03:56:20 crc kubenswrapper[4798]: I1011 03:56:20.904267 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-svt4z" event={"ID":"3c2a342b-5252-4957-9e2c-8f81c304b8af","Type":"ContainerStarted","Data":"66e2aa76f109bbdb0ec131c0724c460f5c1313b096e2fb3ebf1e42fd4832c88b"} Oct 11 03:56:20 crc kubenswrapper[4798]: I1011 03:56:20.906050 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-svt4z" Oct 11 03:56:20 crc kubenswrapper[4798]: I1011 03:56:20.977759 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:20Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:20 crc kubenswrapper[4798]: I1011 03:56:20.992516 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:20 crc kubenswrapper[4798]: I1011 03:56:20.992572 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:20 crc kubenswrapper[4798]: I1011 03:56:20.992586 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:20 crc kubenswrapper[4798]: I1011 03:56:20.992613 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:20 crc kubenswrapper[4798]: I1011 03:56:20.992627 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:20Z","lastTransitionTime":"2025-10-11T03:56:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:21 crc kubenswrapper[4798]: I1011 03:56:21.002620 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42571bc8-2186-4e3b-bba9-28f5a8f364d0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c66c40f71f0877eee058976f94625fe1c4bcab5dfad363be39e6863c2e9f07a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5gdw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d4862faa12fa49bd57098b8b4e9e9dd287da8c8808a9ca32bfdd3a9b4313945d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5gdw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-h28s2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:20Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:21 crc kubenswrapper[4798]: I1011 03:56:21.026321 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-7fk5l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4b6a179c-1d23-4cb2-91b9-d34496a74456\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5bfabccc875a12072ad81dc6bf74d849c99baa004c10d213f798c363248137db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b908ba64c3d6d1d45e1cab82e69176c3c84bf88dde3e9425fc7c3b4c84685ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8b908ba64c3d6d1d45e1cab82e69176c3c84bf88dde3e9425fc7c3b4c84685ed\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae38c0781a4a8367bba95648253e72409e06cdc39825b137084fa6f4d33a4cb7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ae38c0781a4a8367bba95648253e72409e06cdc39825b137084fa6f4d33a4cb7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b663ffff34d80940880b3959c81519084b55bd5edcedbce9402d5fad1f898034\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b663ffff34d80940880b3959c81519084b55bd5edcedbce9402d5fad1f898034\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5b4df13fbe7bbb9ba49f772c2bd4bfe6a10a17bfa12bc500f0647659be7b0cdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5b4df13fbe7bbb9ba49f772c2bd4bfe6a10a17bfa12bc500f0647659be7b0cdf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://997fd5172ccd393f7befe4343619dd30eba136428035ac9fc71f0ec99791a53b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://997fd5172ccd393f7befe4343619dd30eba136428035ac9fc71f0ec99791a53b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://20198b637812738041fd2e57cdb24097b8f92af726b1ad103fee423469a27136\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://20198b637812738041fd2e57cdb24097b8f92af726b1ad103fee423469a27136\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-7fk5l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:21Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:21 crc kubenswrapper[4798]: I1011 03:56:21.048881 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-svt4z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3c2a342b-5252-4957-9e2c-8f81c304b8af\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5a66de04a274f07f090f66d9da727e1985f9c171038de366b409a0300d1b6cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5667e3d7288833ff8c576f64f111c3a2f59619a3577f969d0afdd633962bd30d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dd63d88fe2ebe6035765ee3f3c05c2ef747f9b2869da239825c08667935c9fb5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://671945e25068ff79ab8574dd96b2d81646b8240da41f34a10aa857f0c2520df2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://337fb496a29d8027f6400b57b1625b68029a16e368139f753b9b94df22448477\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c9529e6c53d42e111799753c41ef620b382b2a2b1991aa71d4ee846029a5136\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://66e2aa76f109bbdb0ec131c0724c460f5c1313b096e2fb3ebf1e42fd4832c88b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a9f55c50b0f40e90d5078225583f31860d2cfb67776e6b11ebdc9a41d8b20baf\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-11T03:55:52Z\\\",\\\"message\\\":\\\"UUIDName:}]\\\\nI1011 03:55:52.357804 6446 model_client.go:398] Mutate operations generated as: [{Op:mutate Table:Logical_Router Row:map[] Rows:[] Columns:[] Mutations:[{Column:nat Mutator:insert Value:{GoSet:[{GoUUID:73135118-cf1b-4568-bd31-2f50308bf69d}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {e3c4661a-36a6-47f0-a6c0-a4ee741f2224}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1011 03:55:52.356105 6446 obj_retry.go:303] Retry object setup: *v1.Pod openshift-kube-apiserver/kube-apiserver-crc\\\\nI1011 03:55:52.357905 6446 obj_retry.go:365] Adding new object: *v1.Pod openshift-kube-apiserver/kube-apiserver-crc\\\\nI1011 03:55:52.357951 6446 ovn.go:134] Ensuring zone local for Pod openshift-kube-apiserver/kube-apiserver-crc in node crc\\\\nI1011 03:55:52.357979 6446 obj_retry.go:386] Retry successful for *v1.Pod openshift-kube-apiserver/kube-apiserver-crc after 0 failed attempt(s)\\\\nI1011 03:55:52.358011 6446 default_network_controller.go:776] Recording success event on pod openshift-kube-apiserver/kube-apiserver-crc\\\\nI1011 03:55:52.358029 6446 services_controller.go:356] Processing sync for service openshift-ingress-operator/metrics for network=default\\\\nF1011 03:55:52.357999 6446 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:51Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:56:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://732433bbe785758ed4cbedf667608c4780a5d1e7e2eafd432735f0d74ea01d48\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9f8c139ffa0a5dc24b40f6d5bcc2fa9422f58b275692d3ce3ba5a928c1e454a7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f8c139ffa0a5dc24b40f6d5bcc2fa9422f58b275692d3ce3ba5a928c1e454a7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:26Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-svt4z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:21Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:21 crc kubenswrapper[4798]: I1011 03:56:21.061009 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-cwk9h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a97c4ffd-df63-4ce6-8903-80849a23a9fa\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3dc58b162f1d2b3447282c149ec7cd6d60b4c53fa3cf51dfb4a821d115c9a2c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pfvbf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:28Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-cwk9h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:21Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:21 crc kubenswrapper[4798]: I1011 03:56:21.075024 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bfd670e2-5a6d-4d56-97d6-c72b5474cfe6\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d79289a0b2f4d9eee836f57ba898bd329060194772e80855547f1982371b0921\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1970a648bf4ee991b9b58bfade524bee260b45f727cba5864a6545a10487b3ae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a79edde80930f3aad632c14d6ad6b3df6f8449f32107a82eb906785e873e1ab7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4b8e09f509a9f25922d49024be407873d79d01edb604193f9d19fcd6c89cf96d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://805f894634e76814dc3071315ddd3d8ac46cdf65a12e97f4ca56b98fe53ab25a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-11T03:55:19Z\\\",\\\"message\\\":\\\"W1011 03:55:08.564227 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1011 03:55:08.564630 1 crypto.go:601] Generating new CA for check-endpoints-signer@1760154908 cert, and key in /tmp/serving-cert-1527351980/serving-signer.crt, /tmp/serving-cert-1527351980/serving-signer.key\\\\nI1011 03:55:08.869630 1 observer_polling.go:159] Starting file observer\\\\nW1011 03:55:08.874583 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1011 03:55:08.874721 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1011 03:55:08.878058 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1527351980/tls.crt::/tmp/serving-cert-1527351980/tls.key\\\\\\\"\\\\nF1011 03:55:19.813421 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:08Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://58ca7c079d81ba990c4508c024d6db7c3503be1d628477798459b32e2374ef22\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:08Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1171e7b719c532d99d1d222fc5dbc434060d5d149bd2b6b3dea02f9716816b26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1171e7b719c532d99d1d222fc5dbc434060d5d149bd2b6b3dea02f9716816b26\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:05Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:21Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:21 crc kubenswrapper[4798]: I1011 03:56:21.091700 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2f4f8842-e378-44a9-b919-b1bf9a8df80e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://903e13c07c622087e30f21ffff2b60c31c852d8e994c23d7af6beab5a71bb873\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1087e7b8de486a192155ed94497edbca0dc98e4a84a577267fb132fc48e6b142\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://08a33c11cee6a48ebcf66f251111b8bd888bd40fe6b52f43a3768eac817ed59f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eff419f507fc5362e956419c6fd14dc6a161f7297b026a5ec449647051f98767\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:05Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:21Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:21 crc kubenswrapper[4798]: I1011 03:56:21.095985 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:21 crc kubenswrapper[4798]: I1011 03:56:21.096063 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:21 crc kubenswrapper[4798]: I1011 03:56:21.096083 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:21 crc kubenswrapper[4798]: I1011 03:56:21.096109 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:21 crc kubenswrapper[4798]: I1011 03:56:21.096125 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:21Z","lastTransitionTime":"2025-10-11T03:56:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:21 crc kubenswrapper[4798]: I1011 03:56:21.108617 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4936cbe98df337a2b4d0d6e3effe0afd990facff65daf923c475c9108e05fef9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:21Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:21 crc kubenswrapper[4798]: I1011 03:56:21.124376 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:21Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:21 crc kubenswrapper[4798]: I1011 03:56:21.137119 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kszgd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e999fc8d-dd8d-415f-b72b-1c6189cf18a9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a92a11778001614d226c95af9dde787c78232568c3b5c073618a843f1225b21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h6wm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kszgd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:21Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:21 crc kubenswrapper[4798]: I1011 03:56:21.152276 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:21Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:21 crc kubenswrapper[4798]: I1011 03:56:21.167774 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d9be402e-40bb-4e2d-be8b-349646c34f1a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6897a04b7204031c861d74f6a4d475af9e9b88220cf02bfa50e7845c85cd5f70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b88dcbb90798e97c13f7e7ef7bc7171bb1dc33cf488bd453a8d263a02093dfd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b2fb58514dfb39402d4e10ca699975671a3c53a7fe54b374a987efa8ba401ccf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5844d6ac6f40fe8f9056708ee6ffe64a95b58a4dd50582e455860ce6e5628cd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5844d6ac6f40fe8f9056708ee6ffe64a95b58a4dd50582e455860ce6e5628cd3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:06Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:05Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:21Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:21 crc kubenswrapper[4798]: I1011 03:56:21.184165 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5ec61df088ad49a0392264c33da84b7324c4aa608201d71e309401ab80013fb8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://10697a88b117962307b632f70354acdfd493e92c2909908a31c90c754dcbc0ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:21Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:21 crc kubenswrapper[4798]: I1011 03:56:21.196161 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-5bfzt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3cf2b185-9a26-4448-8fc5-4885f98daf87\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jdt7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jdt7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:39Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-5bfzt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:21Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:21 crc kubenswrapper[4798]: I1011 03:56:21.199463 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:21 crc kubenswrapper[4798]: I1011 03:56:21.199500 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:21 crc kubenswrapper[4798]: I1011 03:56:21.199516 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:21 crc kubenswrapper[4798]: I1011 03:56:21.199536 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:21 crc kubenswrapper[4798]: I1011 03:56:21.199548 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:21Z","lastTransitionTime":"2025-10-11T03:56:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:21 crc kubenswrapper[4798]: I1011 03:56:21.214066 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6129cc4f78a62c5fb76f4859ec954c8901954d27b7f05672e97d992e0c933191\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:21Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:21 crc kubenswrapper[4798]: I1011 03:56:21.228387 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-p6xdd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bd9c0e44-3329-422a-907b-e9e9bb6194cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:56:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:56:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b5b37de1a8a4b221f421a2bf4f4706f9b2e4ce65611438e10e1bf07daa2bf25a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a46e4bfff29af67bd6304e68df8fb4aa7e2184648b85398f453fde878b27d825\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-11T03:56:13Z\\\",\\\"message\\\":\\\"2025-10-11T03:55:27+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_80fdfc1b-8b4c-4573-acb4-89213eeebcec\\\\n2025-10-11T03:55:27+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_80fdfc1b-8b4c-4573-acb4-89213eeebcec to /host/opt/cni/bin/\\\\n2025-10-11T03:55:27Z [verbose] multus-daemon started\\\\n2025-10-11T03:55:27Z [verbose] Readiness Indicator file check\\\\n2025-10-11T03:56:12Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:56:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p9wst\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-p6xdd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:21Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:21 crc kubenswrapper[4798]: I1011 03:56:21.242000 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-5sxhs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"89cbb20d-9fdb-4fed-9ad2-acb1207afc46\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a3655b5826d719e04f0972221ea72554a15025acffa8e2689b4b2855d6151d9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z7jvd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f0c589033410abafbc39c6a7386343c70bb92ed0a1d7336188a44cc6e4fd65c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z7jvd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-5sxhs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:21Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:21 crc kubenswrapper[4798]: I1011 03:56:21.302535 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:21 crc kubenswrapper[4798]: I1011 03:56:21.302596 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:21 crc kubenswrapper[4798]: I1011 03:56:21.302614 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:21 crc kubenswrapper[4798]: I1011 03:56:21.302638 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:21 crc kubenswrapper[4798]: I1011 03:56:21.302651 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:21Z","lastTransitionTime":"2025-10-11T03:56:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:21 crc kubenswrapper[4798]: I1011 03:56:21.405296 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:21 crc kubenswrapper[4798]: I1011 03:56:21.405370 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:21 crc kubenswrapper[4798]: I1011 03:56:21.405388 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:21 crc kubenswrapper[4798]: I1011 03:56:21.405448 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:21 crc kubenswrapper[4798]: I1011 03:56:21.405469 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:21Z","lastTransitionTime":"2025-10-11T03:56:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:21 crc kubenswrapper[4798]: I1011 03:56:21.423399 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-5bfzt" Oct 11 03:56:21 crc kubenswrapper[4798]: I1011 03:56:21.423412 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 11 03:56:21 crc kubenswrapper[4798]: I1011 03:56:21.423428 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 11 03:56:21 crc kubenswrapper[4798]: I1011 03:56:21.423403 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 11 03:56:21 crc kubenswrapper[4798]: E1011 03:56:21.423521 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-5bfzt" podUID="3cf2b185-9a26-4448-8fc5-4885f98daf87" Oct 11 03:56:21 crc kubenswrapper[4798]: E1011 03:56:21.423726 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 11 03:56:21 crc kubenswrapper[4798]: E1011 03:56:21.423782 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 11 03:56:21 crc kubenswrapper[4798]: E1011 03:56:21.423826 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 11 03:56:21 crc kubenswrapper[4798]: I1011 03:56:21.507826 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:21 crc kubenswrapper[4798]: I1011 03:56:21.507904 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:21 crc kubenswrapper[4798]: I1011 03:56:21.507927 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:21 crc kubenswrapper[4798]: I1011 03:56:21.507952 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:21 crc kubenswrapper[4798]: I1011 03:56:21.507970 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:21Z","lastTransitionTime":"2025-10-11T03:56:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:21 crc kubenswrapper[4798]: I1011 03:56:21.614503 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:21 crc kubenswrapper[4798]: I1011 03:56:21.614561 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:21 crc kubenswrapper[4798]: I1011 03:56:21.614575 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:21 crc kubenswrapper[4798]: I1011 03:56:21.614597 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:21 crc kubenswrapper[4798]: I1011 03:56:21.614614 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:21Z","lastTransitionTime":"2025-10-11T03:56:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:21 crc kubenswrapper[4798]: I1011 03:56:21.717320 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:21 crc kubenswrapper[4798]: I1011 03:56:21.717366 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:21 crc kubenswrapper[4798]: I1011 03:56:21.717378 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:21 crc kubenswrapper[4798]: I1011 03:56:21.717410 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:21 crc kubenswrapper[4798]: I1011 03:56:21.717424 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:21Z","lastTransitionTime":"2025-10-11T03:56:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:21 crc kubenswrapper[4798]: I1011 03:56:21.820424 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:21 crc kubenswrapper[4798]: I1011 03:56:21.820496 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:21 crc kubenswrapper[4798]: I1011 03:56:21.820510 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:21 crc kubenswrapper[4798]: I1011 03:56:21.820529 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:21 crc kubenswrapper[4798]: I1011 03:56:21.820540 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:21Z","lastTransitionTime":"2025-10-11T03:56:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:21 crc kubenswrapper[4798]: I1011 03:56:21.909931 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-svt4z_3c2a342b-5252-4957-9e2c-8f81c304b8af/ovnkube-controller/3.log" Oct 11 03:56:21 crc kubenswrapper[4798]: I1011 03:56:21.910578 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-svt4z_3c2a342b-5252-4957-9e2c-8f81c304b8af/ovnkube-controller/2.log" Oct 11 03:56:21 crc kubenswrapper[4798]: I1011 03:56:21.914235 4798 generic.go:334] "Generic (PLEG): container finished" podID="3c2a342b-5252-4957-9e2c-8f81c304b8af" containerID="66e2aa76f109bbdb0ec131c0724c460f5c1313b096e2fb3ebf1e42fd4832c88b" exitCode=1 Oct 11 03:56:21 crc kubenswrapper[4798]: I1011 03:56:21.914279 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-svt4z" event={"ID":"3c2a342b-5252-4957-9e2c-8f81c304b8af","Type":"ContainerDied","Data":"66e2aa76f109bbdb0ec131c0724c460f5c1313b096e2fb3ebf1e42fd4832c88b"} Oct 11 03:56:21 crc kubenswrapper[4798]: I1011 03:56:21.914319 4798 scope.go:117] "RemoveContainer" containerID="a9f55c50b0f40e90d5078225583f31860d2cfb67776e6b11ebdc9a41d8b20baf" Oct 11 03:56:21 crc kubenswrapper[4798]: I1011 03:56:21.915681 4798 scope.go:117] "RemoveContainer" containerID="66e2aa76f109bbdb0ec131c0724c460f5c1313b096e2fb3ebf1e42fd4832c88b" Oct 11 03:56:21 crc kubenswrapper[4798]: E1011 03:56:21.915998 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-svt4z_openshift-ovn-kubernetes(3c2a342b-5252-4957-9e2c-8f81c304b8af)\"" pod="openshift-ovn-kubernetes/ovnkube-node-svt4z" podUID="3c2a342b-5252-4957-9e2c-8f81c304b8af" Oct 11 03:56:21 crc kubenswrapper[4798]: I1011 03:56:21.922996 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:21 crc kubenswrapper[4798]: I1011 03:56:21.923022 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:21 crc kubenswrapper[4798]: I1011 03:56:21.923031 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:21 crc kubenswrapper[4798]: I1011 03:56:21.923045 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:21 crc kubenswrapper[4798]: I1011 03:56:21.923055 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:21Z","lastTransitionTime":"2025-10-11T03:56:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:21 crc kubenswrapper[4798]: I1011 03:56:21.936618 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6129cc4f78a62c5fb76f4859ec954c8901954d27b7f05672e97d992e0c933191\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:21Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:21 crc kubenswrapper[4798]: I1011 03:56:21.960544 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-p6xdd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bd9c0e44-3329-422a-907b-e9e9bb6194cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:56:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:56:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b5b37de1a8a4b221f421a2bf4f4706f9b2e4ce65611438e10e1bf07daa2bf25a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a46e4bfff29af67bd6304e68df8fb4aa7e2184648b85398f453fde878b27d825\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-11T03:56:13Z\\\",\\\"message\\\":\\\"2025-10-11T03:55:27+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_80fdfc1b-8b4c-4573-acb4-89213eeebcec\\\\n2025-10-11T03:55:27+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_80fdfc1b-8b4c-4573-acb4-89213eeebcec to /host/opt/cni/bin/\\\\n2025-10-11T03:55:27Z [verbose] multus-daemon started\\\\n2025-10-11T03:55:27Z [verbose] Readiness Indicator file check\\\\n2025-10-11T03:56:12Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:56:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p9wst\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-p6xdd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:21Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:21 crc kubenswrapper[4798]: I1011 03:56:21.975594 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-5sxhs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"89cbb20d-9fdb-4fed-9ad2-acb1207afc46\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a3655b5826d719e04f0972221ea72554a15025acffa8e2689b4b2855d6151d9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z7jvd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f0c589033410abafbc39c6a7386343c70bb92ed0a1d7336188a44cc6e4fd65c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z7jvd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-5sxhs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:21Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:21 crc kubenswrapper[4798]: I1011 03:56:21.993901 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:21Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:22 crc kubenswrapper[4798]: I1011 03:56:22.012347 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42571bc8-2186-4e3b-bba9-28f5a8f364d0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c66c40f71f0877eee058976f94625fe1c4bcab5dfad363be39e6863c2e9f07a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5gdw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d4862faa12fa49bd57098b8b4e9e9dd287da8c8808a9ca32bfdd3a9b4313945d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5gdw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-h28s2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:22Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:22 crc kubenswrapper[4798]: I1011 03:56:22.025831 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:22 crc kubenswrapper[4798]: I1011 03:56:22.025869 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:22 crc kubenswrapper[4798]: I1011 03:56:22.025882 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:22 crc kubenswrapper[4798]: I1011 03:56:22.025900 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:22 crc kubenswrapper[4798]: I1011 03:56:22.025913 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:22Z","lastTransitionTime":"2025-10-11T03:56:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:22 crc kubenswrapper[4798]: I1011 03:56:22.036496 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-7fk5l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4b6a179c-1d23-4cb2-91b9-d34496a74456\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5bfabccc875a12072ad81dc6bf74d849c99baa004c10d213f798c363248137db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b908ba64c3d6d1d45e1cab82e69176c3c84bf88dde3e9425fc7c3b4c84685ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8b908ba64c3d6d1d45e1cab82e69176c3c84bf88dde3e9425fc7c3b4c84685ed\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae38c0781a4a8367bba95648253e72409e06cdc39825b137084fa6f4d33a4cb7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ae38c0781a4a8367bba95648253e72409e06cdc39825b137084fa6f4d33a4cb7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b663ffff34d80940880b3959c81519084b55bd5edcedbce9402d5fad1f898034\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b663ffff34d80940880b3959c81519084b55bd5edcedbce9402d5fad1f898034\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5b4df13fbe7bbb9ba49f772c2bd4bfe6a10a17bfa12bc500f0647659be7b0cdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5b4df13fbe7bbb9ba49f772c2bd4bfe6a10a17bfa12bc500f0647659be7b0cdf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://997fd5172ccd393f7befe4343619dd30eba136428035ac9fc71f0ec99791a53b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://997fd5172ccd393f7befe4343619dd30eba136428035ac9fc71f0ec99791a53b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://20198b637812738041fd2e57cdb24097b8f92af726b1ad103fee423469a27136\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://20198b637812738041fd2e57cdb24097b8f92af726b1ad103fee423469a27136\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-7fk5l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:22Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:22 crc kubenswrapper[4798]: I1011 03:56:22.061076 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-svt4z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3c2a342b-5252-4957-9e2c-8f81c304b8af\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5a66de04a274f07f090f66d9da727e1985f9c171038de366b409a0300d1b6cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5667e3d7288833ff8c576f64f111c3a2f59619a3577f969d0afdd633962bd30d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dd63d88fe2ebe6035765ee3f3c05c2ef747f9b2869da239825c08667935c9fb5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://671945e25068ff79ab8574dd96b2d81646b8240da41f34a10aa857f0c2520df2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://337fb496a29d8027f6400b57b1625b68029a16e368139f753b9b94df22448477\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c9529e6c53d42e111799753c41ef620b382b2a2b1991aa71d4ee846029a5136\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://66e2aa76f109bbdb0ec131c0724c460f5c1313b096e2fb3ebf1e42fd4832c88b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a9f55c50b0f40e90d5078225583f31860d2cfb67776e6b11ebdc9a41d8b20baf\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-11T03:55:52Z\\\",\\\"message\\\":\\\"UUIDName:}]\\\\nI1011 03:55:52.357804 6446 model_client.go:398] Mutate operations generated as: [{Op:mutate Table:Logical_Router Row:map[] Rows:[] Columns:[] Mutations:[{Column:nat Mutator:insert Value:{GoSet:[{GoUUID:73135118-cf1b-4568-bd31-2f50308bf69d}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {e3c4661a-36a6-47f0-a6c0-a4ee741f2224}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1011 03:55:52.356105 6446 obj_retry.go:303] Retry object setup: *v1.Pod openshift-kube-apiserver/kube-apiserver-crc\\\\nI1011 03:55:52.357905 6446 obj_retry.go:365] Adding new object: *v1.Pod openshift-kube-apiserver/kube-apiserver-crc\\\\nI1011 03:55:52.357951 6446 ovn.go:134] Ensuring zone local for Pod openshift-kube-apiserver/kube-apiserver-crc in node crc\\\\nI1011 03:55:52.357979 6446 obj_retry.go:386] Retry successful for *v1.Pod openshift-kube-apiserver/kube-apiserver-crc after 0 failed attempt(s)\\\\nI1011 03:55:52.358011 6446 default_network_controller.go:776] Recording success event on pod openshift-kube-apiserver/kube-apiserver-crc\\\\nI1011 03:55:52.358029 6446 services_controller.go:356] Processing sync for service openshift-ingress-operator/metrics for network=default\\\\nF1011 03:55:52.357999 6446 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:51Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://66e2aa76f109bbdb0ec131c0724c460f5c1313b096e2fb3ebf1e42fd4832c88b\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-11T03:56:21Z\\\",\\\"message\\\":\\\"c8-46726fd37529 0xc007364d2d 0xc007364d2e}] [] []},Spec:ServiceSpec{Ports:[]ServicePort{ServicePort{Name:grpc,Protocol:TCP,Port:50051,TargetPort:{0 50051 },NodePort:0,AppProtocol:nil,},},Selector:map[string]string{olm.catalogSource: community-operators,olm.managed: true,},ClusterIP:10.217.5.189,Type:ClusterIP,ExternalIPs:[],SessionAffinity:None,LoadBalancerIP:,LoadBalancerSourceRanges:[],ExternalName:,ExternalTrafficPolicy:,HealthCheckNodePort:0,PublishNotReadyAddresses:false,SessionAffinityConfig:nil,IPFamilyPolicy:*SingleStack,ClusterIPs:[10.217.5.189],IPFamilies:[IPv4],AllocateLoadBalancerNodePorts:nil,LoadBalancerClass:nil,InternalTrafficPolicy:*Cluster,TrafficDistribution:nil,},Status:ServiceStatus{LoadBalancer:LoadBalancerStatus{Ingress:[]LoadBalancerIngress{},},Conditions:[]Condition{},},}\\\\nF1011 03:56:21.403655 6812 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurre\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-11T03:56:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://732433bbe785758ed4cbedf667608c4780a5d1e7e2eafd432735f0d74ea01d48\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9f8c139ffa0a5dc24b40f6d5bcc2fa9422f58b275692d3ce3ba5a928c1e454a7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f8c139ffa0a5dc24b40f6d5bcc2fa9422f58b275692d3ce3ba5a928c1e454a7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:26Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-svt4z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:22Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:22 crc kubenswrapper[4798]: I1011 03:56:22.077756 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-cwk9h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a97c4ffd-df63-4ce6-8903-80849a23a9fa\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3dc58b162f1d2b3447282c149ec7cd6d60b4c53fa3cf51dfb4a821d115c9a2c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pfvbf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:28Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-cwk9h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:22Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:22 crc kubenswrapper[4798]: I1011 03:56:22.097308 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bfd670e2-5a6d-4d56-97d6-c72b5474cfe6\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d79289a0b2f4d9eee836f57ba898bd329060194772e80855547f1982371b0921\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1970a648bf4ee991b9b58bfade524bee260b45f727cba5864a6545a10487b3ae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a79edde80930f3aad632c14d6ad6b3df6f8449f32107a82eb906785e873e1ab7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4b8e09f509a9f25922d49024be407873d79d01edb604193f9d19fcd6c89cf96d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://805f894634e76814dc3071315ddd3d8ac46cdf65a12e97f4ca56b98fe53ab25a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-11T03:55:19Z\\\",\\\"message\\\":\\\"W1011 03:55:08.564227 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1011 03:55:08.564630 1 crypto.go:601] Generating new CA for check-endpoints-signer@1760154908 cert, and key in /tmp/serving-cert-1527351980/serving-signer.crt, /tmp/serving-cert-1527351980/serving-signer.key\\\\nI1011 03:55:08.869630 1 observer_polling.go:159] Starting file observer\\\\nW1011 03:55:08.874583 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1011 03:55:08.874721 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1011 03:55:08.878058 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1527351980/tls.crt::/tmp/serving-cert-1527351980/tls.key\\\\\\\"\\\\nF1011 03:55:19.813421 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:08Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://58ca7c079d81ba990c4508c024d6db7c3503be1d628477798459b32e2374ef22\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:08Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1171e7b719c532d99d1d222fc5dbc434060d5d149bd2b6b3dea02f9716816b26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1171e7b719c532d99d1d222fc5dbc434060d5d149bd2b6b3dea02f9716816b26\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:05Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:22Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:22 crc kubenswrapper[4798]: I1011 03:56:22.116818 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2f4f8842-e378-44a9-b919-b1bf9a8df80e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://903e13c07c622087e30f21ffff2b60c31c852d8e994c23d7af6beab5a71bb873\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1087e7b8de486a192155ed94497edbca0dc98e4a84a577267fb132fc48e6b142\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://08a33c11cee6a48ebcf66f251111b8bd888bd40fe6b52f43a3768eac817ed59f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eff419f507fc5362e956419c6fd14dc6a161f7297b026a5ec449647051f98767\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:05Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:22Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:22 crc kubenswrapper[4798]: I1011 03:56:22.128920 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:22 crc kubenswrapper[4798]: I1011 03:56:22.128958 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:22 crc kubenswrapper[4798]: I1011 03:56:22.128966 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:22 crc kubenswrapper[4798]: I1011 03:56:22.128980 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:22 crc kubenswrapper[4798]: I1011 03:56:22.128991 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:22Z","lastTransitionTime":"2025-10-11T03:56:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:22 crc kubenswrapper[4798]: I1011 03:56:22.136264 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4936cbe98df337a2b4d0d6e3effe0afd990facff65daf923c475c9108e05fef9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:22Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:22 crc kubenswrapper[4798]: I1011 03:56:22.155310 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:22Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:22 crc kubenswrapper[4798]: I1011 03:56:22.168123 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kszgd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e999fc8d-dd8d-415f-b72b-1c6189cf18a9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a92a11778001614d226c95af9dde787c78232568c3b5c073618a843f1225b21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h6wm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kszgd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:22Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:22 crc kubenswrapper[4798]: I1011 03:56:22.182677 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:22Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:22 crc kubenswrapper[4798]: I1011 03:56:22.198910 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d9be402e-40bb-4e2d-be8b-349646c34f1a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6897a04b7204031c861d74f6a4d475af9e9b88220cf02bfa50e7845c85cd5f70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b88dcbb90798e97c13f7e7ef7bc7171bb1dc33cf488bd453a8d263a02093dfd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b2fb58514dfb39402d4e10ca699975671a3c53a7fe54b374a987efa8ba401ccf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5844d6ac6f40fe8f9056708ee6ffe64a95b58a4dd50582e455860ce6e5628cd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5844d6ac6f40fe8f9056708ee6ffe64a95b58a4dd50582e455860ce6e5628cd3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:06Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:05Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:22Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:22 crc kubenswrapper[4798]: I1011 03:56:22.214319 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5ec61df088ad49a0392264c33da84b7324c4aa608201d71e309401ab80013fb8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://10697a88b117962307b632f70354acdfd493e92c2909908a31c90c754dcbc0ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:22Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:22 crc kubenswrapper[4798]: I1011 03:56:22.228150 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-5bfzt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3cf2b185-9a26-4448-8fc5-4885f98daf87\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jdt7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jdt7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:39Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-5bfzt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:22Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:22 crc kubenswrapper[4798]: I1011 03:56:22.230842 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:22 crc kubenswrapper[4798]: I1011 03:56:22.230918 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:22 crc kubenswrapper[4798]: I1011 03:56:22.230946 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:22 crc kubenswrapper[4798]: I1011 03:56:22.230982 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:22 crc kubenswrapper[4798]: I1011 03:56:22.231006 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:22Z","lastTransitionTime":"2025-10-11T03:56:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:22 crc kubenswrapper[4798]: I1011 03:56:22.333928 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:22 crc kubenswrapper[4798]: I1011 03:56:22.334009 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:22 crc kubenswrapper[4798]: I1011 03:56:22.334030 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:22 crc kubenswrapper[4798]: I1011 03:56:22.334058 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:22 crc kubenswrapper[4798]: I1011 03:56:22.334078 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:22Z","lastTransitionTime":"2025-10-11T03:56:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:22 crc kubenswrapper[4798]: I1011 03:56:22.454471 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:22 crc kubenswrapper[4798]: I1011 03:56:22.454521 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:22 crc kubenswrapper[4798]: I1011 03:56:22.454540 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:22 crc kubenswrapper[4798]: I1011 03:56:22.454563 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:22 crc kubenswrapper[4798]: I1011 03:56:22.454581 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:22Z","lastTransitionTime":"2025-10-11T03:56:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:22 crc kubenswrapper[4798]: I1011 03:56:22.558014 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:22 crc kubenswrapper[4798]: I1011 03:56:22.558072 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:22 crc kubenswrapper[4798]: I1011 03:56:22.558090 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:22 crc kubenswrapper[4798]: I1011 03:56:22.558113 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:22 crc kubenswrapper[4798]: I1011 03:56:22.558133 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:22Z","lastTransitionTime":"2025-10-11T03:56:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:22 crc kubenswrapper[4798]: I1011 03:56:22.661729 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:22 crc kubenswrapper[4798]: I1011 03:56:22.661826 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:22 crc kubenswrapper[4798]: I1011 03:56:22.661852 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:22 crc kubenswrapper[4798]: I1011 03:56:22.661882 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:22 crc kubenswrapper[4798]: I1011 03:56:22.661904 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:22Z","lastTransitionTime":"2025-10-11T03:56:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:22 crc kubenswrapper[4798]: I1011 03:56:22.764750 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:22 crc kubenswrapper[4798]: I1011 03:56:22.764790 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:22 crc kubenswrapper[4798]: I1011 03:56:22.764802 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:22 crc kubenswrapper[4798]: I1011 03:56:22.764819 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:22 crc kubenswrapper[4798]: I1011 03:56:22.764831 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:22Z","lastTransitionTime":"2025-10-11T03:56:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:22 crc kubenswrapper[4798]: I1011 03:56:22.869675 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:22 crc kubenswrapper[4798]: I1011 03:56:22.869778 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:22 crc kubenswrapper[4798]: I1011 03:56:22.869807 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:22 crc kubenswrapper[4798]: I1011 03:56:22.869848 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:22 crc kubenswrapper[4798]: I1011 03:56:22.869881 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:22Z","lastTransitionTime":"2025-10-11T03:56:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:22 crc kubenswrapper[4798]: I1011 03:56:22.886817 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:22 crc kubenswrapper[4798]: I1011 03:56:22.886887 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:22 crc kubenswrapper[4798]: I1011 03:56:22.886908 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:22 crc kubenswrapper[4798]: I1011 03:56:22.886938 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:22 crc kubenswrapper[4798]: I1011 03:56:22.886959 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:22Z","lastTransitionTime":"2025-10-11T03:56:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:22 crc kubenswrapper[4798]: E1011 03:56:22.912646 4798 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T03:56:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T03:56:22Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T03:56:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T03:56:22Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T03:56:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T03:56:22Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T03:56:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T03:56:22Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"319d317c-5958-4e72-8ec1-75f04665505c\\\",\\\"systemUUID\\\":\\\"914a85fd-a642-4471-b13e-f33764f494b0\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:22Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:22 crc kubenswrapper[4798]: I1011 03:56:22.918510 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:22 crc kubenswrapper[4798]: I1011 03:56:22.918561 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:22 crc kubenswrapper[4798]: I1011 03:56:22.918582 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:22 crc kubenswrapper[4798]: I1011 03:56:22.918987 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:22 crc kubenswrapper[4798]: I1011 03:56:22.919029 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:22Z","lastTransitionTime":"2025-10-11T03:56:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:22 crc kubenswrapper[4798]: I1011 03:56:22.921766 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-svt4z_3c2a342b-5252-4957-9e2c-8f81c304b8af/ovnkube-controller/3.log" Oct 11 03:56:22 crc kubenswrapper[4798]: I1011 03:56:22.932866 4798 scope.go:117] "RemoveContainer" containerID="66e2aa76f109bbdb0ec131c0724c460f5c1313b096e2fb3ebf1e42fd4832c88b" Oct 11 03:56:22 crc kubenswrapper[4798]: E1011 03:56:22.933851 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-svt4z_openshift-ovn-kubernetes(3c2a342b-5252-4957-9e2c-8f81c304b8af)\"" pod="openshift-ovn-kubernetes/ovnkube-node-svt4z" podUID="3c2a342b-5252-4957-9e2c-8f81c304b8af" Oct 11 03:56:22 crc kubenswrapper[4798]: E1011 03:56:22.951108 4798 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T03:56:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T03:56:22Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T03:56:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T03:56:22Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T03:56:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T03:56:22Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T03:56:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T03:56:22Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"319d317c-5958-4e72-8ec1-75f04665505c\\\",\\\"systemUUID\\\":\\\"914a85fd-a642-4471-b13e-f33764f494b0\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:22Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:22 crc kubenswrapper[4798]: I1011 03:56:22.953149 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6129cc4f78a62c5fb76f4859ec954c8901954d27b7f05672e97d992e0c933191\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:22Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:22 crc kubenswrapper[4798]: I1011 03:56:22.958719 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:22 crc kubenswrapper[4798]: I1011 03:56:22.959238 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:22 crc kubenswrapper[4798]: I1011 03:56:22.959251 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:22 crc kubenswrapper[4798]: I1011 03:56:22.959270 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:22 crc kubenswrapper[4798]: I1011 03:56:22.959284 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:22Z","lastTransitionTime":"2025-10-11T03:56:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:22 crc kubenswrapper[4798]: I1011 03:56:22.977949 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-p6xdd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bd9c0e44-3329-422a-907b-e9e9bb6194cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:56:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:56:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b5b37de1a8a4b221f421a2bf4f4706f9b2e4ce65611438e10e1bf07daa2bf25a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a46e4bfff29af67bd6304e68df8fb4aa7e2184648b85398f453fde878b27d825\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-11T03:56:13Z\\\",\\\"message\\\":\\\"2025-10-11T03:55:27+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_80fdfc1b-8b4c-4573-acb4-89213eeebcec\\\\n2025-10-11T03:55:27+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_80fdfc1b-8b4c-4573-acb4-89213eeebcec to /host/opt/cni/bin/\\\\n2025-10-11T03:55:27Z [verbose] multus-daemon started\\\\n2025-10-11T03:55:27Z [verbose] Readiness Indicator file check\\\\n2025-10-11T03:56:12Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:56:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p9wst\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-p6xdd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:22Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:22 crc kubenswrapper[4798]: E1011 03:56:22.982553 4798 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T03:56:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T03:56:22Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T03:56:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T03:56:22Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T03:56:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T03:56:22Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T03:56:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T03:56:22Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"319d317c-5958-4e72-8ec1-75f04665505c\\\",\\\"systemUUID\\\":\\\"914a85fd-a642-4471-b13e-f33764f494b0\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:22Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:22 crc kubenswrapper[4798]: I1011 03:56:22.988261 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:22 crc kubenswrapper[4798]: I1011 03:56:22.988318 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:22 crc kubenswrapper[4798]: I1011 03:56:22.988332 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:22 crc kubenswrapper[4798]: I1011 03:56:22.988355 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:22 crc kubenswrapper[4798]: I1011 03:56:22.988374 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:22Z","lastTransitionTime":"2025-10-11T03:56:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:22 crc kubenswrapper[4798]: I1011 03:56:22.997895 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-5sxhs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"89cbb20d-9fdb-4fed-9ad2-acb1207afc46\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a3655b5826d719e04f0972221ea72554a15025acffa8e2689b4b2855d6151d9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z7jvd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f0c589033410abafbc39c6a7386343c70bb92ed0a1d7336188a44cc6e4fd65c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z7jvd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-5sxhs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:22Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:23 crc kubenswrapper[4798]: E1011 03:56:23.008048 4798 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T03:56:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T03:56:22Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T03:56:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T03:56:22Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T03:56:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T03:56:22Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T03:56:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T03:56:22Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"319d317c-5958-4e72-8ec1-75f04665505c\\\",\\\"systemUUID\\\":\\\"914a85fd-a642-4471-b13e-f33764f494b0\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:23Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:23 crc kubenswrapper[4798]: I1011 03:56:23.013961 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:23 crc kubenswrapper[4798]: I1011 03:56:23.014029 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:23 crc kubenswrapper[4798]: I1011 03:56:23.014048 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:23 crc kubenswrapper[4798]: I1011 03:56:23.014082 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:23 crc kubenswrapper[4798]: I1011 03:56:23.014100 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:23Z","lastTransitionTime":"2025-10-11T03:56:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:23 crc kubenswrapper[4798]: I1011 03:56:23.018927 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42571bc8-2186-4e3b-bba9-28f5a8f364d0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c66c40f71f0877eee058976f94625fe1c4bcab5dfad363be39e6863c2e9f07a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5gdw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d4862faa12fa49bd57098b8b4e9e9dd287da8c8808a9ca32bfdd3a9b4313945d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5gdw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-h28s2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:23Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:23 crc kubenswrapper[4798]: E1011 03:56:23.037625 4798 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T03:56:23Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T03:56:23Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T03:56:23Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T03:56:23Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T03:56:23Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T03:56:23Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T03:56:23Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T03:56:23Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"319d317c-5958-4e72-8ec1-75f04665505c\\\",\\\"systemUUID\\\":\\\"914a85fd-a642-4471-b13e-f33764f494b0\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:23Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:23 crc kubenswrapper[4798]: E1011 03:56:23.037899 4798 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Oct 11 03:56:23 crc kubenswrapper[4798]: I1011 03:56:23.040432 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:23 crc kubenswrapper[4798]: I1011 03:56:23.040491 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:23 crc kubenswrapper[4798]: I1011 03:56:23.040519 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:23 crc kubenswrapper[4798]: I1011 03:56:23.040554 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:23 crc kubenswrapper[4798]: I1011 03:56:23.040584 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:23Z","lastTransitionTime":"2025-10-11T03:56:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:23 crc kubenswrapper[4798]: I1011 03:56:23.051338 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-7fk5l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4b6a179c-1d23-4cb2-91b9-d34496a74456\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5bfabccc875a12072ad81dc6bf74d849c99baa004c10d213f798c363248137db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b908ba64c3d6d1d45e1cab82e69176c3c84bf88dde3e9425fc7c3b4c84685ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8b908ba64c3d6d1d45e1cab82e69176c3c84bf88dde3e9425fc7c3b4c84685ed\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae38c0781a4a8367bba95648253e72409e06cdc39825b137084fa6f4d33a4cb7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ae38c0781a4a8367bba95648253e72409e06cdc39825b137084fa6f4d33a4cb7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b663ffff34d80940880b3959c81519084b55bd5edcedbce9402d5fad1f898034\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b663ffff34d80940880b3959c81519084b55bd5edcedbce9402d5fad1f898034\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5b4df13fbe7bbb9ba49f772c2bd4bfe6a10a17bfa12bc500f0647659be7b0cdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5b4df13fbe7bbb9ba49f772c2bd4bfe6a10a17bfa12bc500f0647659be7b0cdf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://997fd5172ccd393f7befe4343619dd30eba136428035ac9fc71f0ec99791a53b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://997fd5172ccd393f7befe4343619dd30eba136428035ac9fc71f0ec99791a53b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://20198b637812738041fd2e57cdb24097b8f92af726b1ad103fee423469a27136\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://20198b637812738041fd2e57cdb24097b8f92af726b1ad103fee423469a27136\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-7fk5l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:23Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:23 crc kubenswrapper[4798]: I1011 03:56:23.085359 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-svt4z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3c2a342b-5252-4957-9e2c-8f81c304b8af\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5a66de04a274f07f090f66d9da727e1985f9c171038de366b409a0300d1b6cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5667e3d7288833ff8c576f64f111c3a2f59619a3577f969d0afdd633962bd30d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dd63d88fe2ebe6035765ee3f3c05c2ef747f9b2869da239825c08667935c9fb5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://671945e25068ff79ab8574dd96b2d81646b8240da41f34a10aa857f0c2520df2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://337fb496a29d8027f6400b57b1625b68029a16e368139f753b9b94df22448477\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c9529e6c53d42e111799753c41ef620b382b2a2b1991aa71d4ee846029a5136\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://66e2aa76f109bbdb0ec131c0724c460f5c1313b096e2fb3ebf1e42fd4832c88b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://66e2aa76f109bbdb0ec131c0724c460f5c1313b096e2fb3ebf1e42fd4832c88b\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-11T03:56:21Z\\\",\\\"message\\\":\\\"c8-46726fd37529 0xc007364d2d 0xc007364d2e}] [] []},Spec:ServiceSpec{Ports:[]ServicePort{ServicePort{Name:grpc,Protocol:TCP,Port:50051,TargetPort:{0 50051 },NodePort:0,AppProtocol:nil,},},Selector:map[string]string{olm.catalogSource: community-operators,olm.managed: true,},ClusterIP:10.217.5.189,Type:ClusterIP,ExternalIPs:[],SessionAffinity:None,LoadBalancerIP:,LoadBalancerSourceRanges:[],ExternalName:,ExternalTrafficPolicy:,HealthCheckNodePort:0,PublishNotReadyAddresses:false,SessionAffinityConfig:nil,IPFamilyPolicy:*SingleStack,ClusterIPs:[10.217.5.189],IPFamilies:[IPv4],AllocateLoadBalancerNodePorts:nil,LoadBalancerClass:nil,InternalTrafficPolicy:*Cluster,TrafficDistribution:nil,},Status:ServiceStatus{LoadBalancer:LoadBalancerStatus{Ingress:[]LoadBalancerIngress{},},Conditions:[]Condition{},},}\\\\nF1011 03:56:21.403655 6812 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurre\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-11T03:56:20Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-svt4z_openshift-ovn-kubernetes(3c2a342b-5252-4957-9e2c-8f81c304b8af)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://732433bbe785758ed4cbedf667608c4780a5d1e7e2eafd432735f0d74ea01d48\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9f8c139ffa0a5dc24b40f6d5bcc2fa9422f58b275692d3ce3ba5a928c1e454a7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f8c139ffa0a5dc24b40f6d5bcc2fa9422f58b275692d3ce3ba5a928c1e454a7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:26Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-svt4z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:23Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:23 crc kubenswrapper[4798]: I1011 03:56:23.104278 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-cwk9h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a97c4ffd-df63-4ce6-8903-80849a23a9fa\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3dc58b162f1d2b3447282c149ec7cd6d60b4c53fa3cf51dfb4a821d115c9a2c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pfvbf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:28Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-cwk9h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:23Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:23 crc kubenswrapper[4798]: I1011 03:56:23.128006 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:23Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:23 crc kubenswrapper[4798]: I1011 03:56:23.143562 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:23 crc kubenswrapper[4798]: I1011 03:56:23.143602 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:23 crc kubenswrapper[4798]: I1011 03:56:23.143615 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:23 crc kubenswrapper[4798]: I1011 03:56:23.143630 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:23 crc kubenswrapper[4798]: I1011 03:56:23.143643 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:23Z","lastTransitionTime":"2025-10-11T03:56:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:23 crc kubenswrapper[4798]: I1011 03:56:23.151359 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2f4f8842-e378-44a9-b919-b1bf9a8df80e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://903e13c07c622087e30f21ffff2b60c31c852d8e994c23d7af6beab5a71bb873\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1087e7b8de486a192155ed94497edbca0dc98e4a84a577267fb132fc48e6b142\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://08a33c11cee6a48ebcf66f251111b8bd888bd40fe6b52f43a3768eac817ed59f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eff419f507fc5362e956419c6fd14dc6a161f7297b026a5ec449647051f98767\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:05Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:23Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:23 crc kubenswrapper[4798]: I1011 03:56:23.167909 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4936cbe98df337a2b4d0d6e3effe0afd990facff65daf923c475c9108e05fef9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:23Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:23 crc kubenswrapper[4798]: I1011 03:56:23.183431 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:23Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:23 crc kubenswrapper[4798]: I1011 03:56:23.196619 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kszgd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e999fc8d-dd8d-415f-b72b-1c6189cf18a9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a92a11778001614d226c95af9dde787c78232568c3b5c073618a843f1225b21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h6wm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kszgd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:23Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:23 crc kubenswrapper[4798]: I1011 03:56:23.213687 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:23Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:23 crc kubenswrapper[4798]: I1011 03:56:23.228979 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bfd670e2-5a6d-4d56-97d6-c72b5474cfe6\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d79289a0b2f4d9eee836f57ba898bd329060194772e80855547f1982371b0921\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1970a648bf4ee991b9b58bfade524bee260b45f727cba5864a6545a10487b3ae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a79edde80930f3aad632c14d6ad6b3df6f8449f32107a82eb906785e873e1ab7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4b8e09f509a9f25922d49024be407873d79d01edb604193f9d19fcd6c89cf96d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://805f894634e76814dc3071315ddd3d8ac46cdf65a12e97f4ca56b98fe53ab25a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-11T03:55:19Z\\\",\\\"message\\\":\\\"W1011 03:55:08.564227 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1011 03:55:08.564630 1 crypto.go:601] Generating new CA for check-endpoints-signer@1760154908 cert, and key in /tmp/serving-cert-1527351980/serving-signer.crt, /tmp/serving-cert-1527351980/serving-signer.key\\\\nI1011 03:55:08.869630 1 observer_polling.go:159] Starting file observer\\\\nW1011 03:55:08.874583 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1011 03:55:08.874721 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1011 03:55:08.878058 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1527351980/tls.crt::/tmp/serving-cert-1527351980/tls.key\\\\\\\"\\\\nF1011 03:55:19.813421 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:08Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://58ca7c079d81ba990c4508c024d6db7c3503be1d628477798459b32e2374ef22\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:08Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1171e7b719c532d99d1d222fc5dbc434060d5d149bd2b6b3dea02f9716816b26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1171e7b719c532d99d1d222fc5dbc434060d5d149bd2b6b3dea02f9716816b26\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:05Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:23Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:23 crc kubenswrapper[4798]: I1011 03:56:23.241448 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5ec61df088ad49a0392264c33da84b7324c4aa608201d71e309401ab80013fb8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://10697a88b117962307b632f70354acdfd493e92c2909908a31c90c754dcbc0ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:23Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:23 crc kubenswrapper[4798]: I1011 03:56:23.247117 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:23 crc kubenswrapper[4798]: I1011 03:56:23.247181 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:23 crc kubenswrapper[4798]: I1011 03:56:23.247202 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:23 crc kubenswrapper[4798]: I1011 03:56:23.247228 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:23 crc kubenswrapper[4798]: I1011 03:56:23.247249 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:23Z","lastTransitionTime":"2025-10-11T03:56:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:23 crc kubenswrapper[4798]: I1011 03:56:23.257296 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-5bfzt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3cf2b185-9a26-4448-8fc5-4885f98daf87\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jdt7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jdt7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:39Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-5bfzt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:23Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:23 crc kubenswrapper[4798]: I1011 03:56:23.276941 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d9be402e-40bb-4e2d-be8b-349646c34f1a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6897a04b7204031c861d74f6a4d475af9e9b88220cf02bfa50e7845c85cd5f70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b88dcbb90798e97c13f7e7ef7bc7171bb1dc33cf488bd453a8d263a02093dfd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b2fb58514dfb39402d4e10ca699975671a3c53a7fe54b374a987efa8ba401ccf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5844d6ac6f40fe8f9056708ee6ffe64a95b58a4dd50582e455860ce6e5628cd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5844d6ac6f40fe8f9056708ee6ffe64a95b58a4dd50582e455860ce6e5628cd3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:06Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:05Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:23Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:23 crc kubenswrapper[4798]: I1011 03:56:23.350193 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:23 crc kubenswrapper[4798]: I1011 03:56:23.350248 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:23 crc kubenswrapper[4798]: I1011 03:56:23.350261 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:23 crc kubenswrapper[4798]: I1011 03:56:23.350280 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:23 crc kubenswrapper[4798]: I1011 03:56:23.350292 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:23Z","lastTransitionTime":"2025-10-11T03:56:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:23 crc kubenswrapper[4798]: I1011 03:56:23.423374 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 11 03:56:23 crc kubenswrapper[4798]: I1011 03:56:23.423465 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 11 03:56:23 crc kubenswrapper[4798]: E1011 03:56:23.423547 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 11 03:56:23 crc kubenswrapper[4798]: I1011 03:56:23.423567 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 11 03:56:23 crc kubenswrapper[4798]: I1011 03:56:23.423652 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-5bfzt" Oct 11 03:56:23 crc kubenswrapper[4798]: E1011 03:56:23.423716 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 11 03:56:23 crc kubenswrapper[4798]: E1011 03:56:23.423837 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 11 03:56:23 crc kubenswrapper[4798]: E1011 03:56:23.424082 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-5bfzt" podUID="3cf2b185-9a26-4448-8fc5-4885f98daf87" Oct 11 03:56:23 crc kubenswrapper[4798]: I1011 03:56:23.453050 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:23 crc kubenswrapper[4798]: I1011 03:56:23.453094 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:23 crc kubenswrapper[4798]: I1011 03:56:23.453107 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:23 crc kubenswrapper[4798]: I1011 03:56:23.453127 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:23 crc kubenswrapper[4798]: I1011 03:56:23.453143 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:23Z","lastTransitionTime":"2025-10-11T03:56:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:23 crc kubenswrapper[4798]: I1011 03:56:23.556126 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:23 crc kubenswrapper[4798]: I1011 03:56:23.556177 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:23 crc kubenswrapper[4798]: I1011 03:56:23.556190 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:23 crc kubenswrapper[4798]: I1011 03:56:23.556210 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:23 crc kubenswrapper[4798]: I1011 03:56:23.556224 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:23Z","lastTransitionTime":"2025-10-11T03:56:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:23 crc kubenswrapper[4798]: I1011 03:56:23.658796 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:23 crc kubenswrapper[4798]: I1011 03:56:23.658854 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:23 crc kubenswrapper[4798]: I1011 03:56:23.658901 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:23 crc kubenswrapper[4798]: I1011 03:56:23.658928 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:23 crc kubenswrapper[4798]: I1011 03:56:23.658953 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:23Z","lastTransitionTime":"2025-10-11T03:56:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:23 crc kubenswrapper[4798]: I1011 03:56:23.761833 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:23 crc kubenswrapper[4798]: I1011 03:56:23.761875 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:23 crc kubenswrapper[4798]: I1011 03:56:23.761886 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:23 crc kubenswrapper[4798]: I1011 03:56:23.761939 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:23 crc kubenswrapper[4798]: I1011 03:56:23.761956 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:23Z","lastTransitionTime":"2025-10-11T03:56:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:23 crc kubenswrapper[4798]: I1011 03:56:23.864668 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:23 crc kubenswrapper[4798]: I1011 03:56:23.864724 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:23 crc kubenswrapper[4798]: I1011 03:56:23.864736 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:23 crc kubenswrapper[4798]: I1011 03:56:23.864754 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:23 crc kubenswrapper[4798]: I1011 03:56:23.864767 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:23Z","lastTransitionTime":"2025-10-11T03:56:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:23 crc kubenswrapper[4798]: I1011 03:56:23.967979 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:23 crc kubenswrapper[4798]: I1011 03:56:23.968033 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:23 crc kubenswrapper[4798]: I1011 03:56:23.968044 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:23 crc kubenswrapper[4798]: I1011 03:56:23.968065 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:23 crc kubenswrapper[4798]: I1011 03:56:23.968083 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:23Z","lastTransitionTime":"2025-10-11T03:56:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:24 crc kubenswrapper[4798]: I1011 03:56:24.071360 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:24 crc kubenswrapper[4798]: I1011 03:56:24.071486 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:24 crc kubenswrapper[4798]: I1011 03:56:24.071511 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:24 crc kubenswrapper[4798]: I1011 03:56:24.071544 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:24 crc kubenswrapper[4798]: I1011 03:56:24.071567 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:24Z","lastTransitionTime":"2025-10-11T03:56:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:24 crc kubenswrapper[4798]: I1011 03:56:24.175422 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:24 crc kubenswrapper[4798]: I1011 03:56:24.175496 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:24 crc kubenswrapper[4798]: I1011 03:56:24.175516 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:24 crc kubenswrapper[4798]: I1011 03:56:24.175549 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:24 crc kubenswrapper[4798]: I1011 03:56:24.175582 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:24Z","lastTransitionTime":"2025-10-11T03:56:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:24 crc kubenswrapper[4798]: I1011 03:56:24.279097 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:24 crc kubenswrapper[4798]: I1011 03:56:24.279149 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:24 crc kubenswrapper[4798]: I1011 03:56:24.279160 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:24 crc kubenswrapper[4798]: I1011 03:56:24.279182 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:24 crc kubenswrapper[4798]: I1011 03:56:24.279194 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:24Z","lastTransitionTime":"2025-10-11T03:56:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:24 crc kubenswrapper[4798]: I1011 03:56:24.383773 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:24 crc kubenswrapper[4798]: I1011 03:56:24.383856 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:24 crc kubenswrapper[4798]: I1011 03:56:24.383877 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:24 crc kubenswrapper[4798]: I1011 03:56:24.383907 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:24 crc kubenswrapper[4798]: I1011 03:56:24.383928 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:24Z","lastTransitionTime":"2025-10-11T03:56:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:24 crc kubenswrapper[4798]: I1011 03:56:24.487376 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:24 crc kubenswrapper[4798]: I1011 03:56:24.487573 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:24 crc kubenswrapper[4798]: I1011 03:56:24.487600 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:24 crc kubenswrapper[4798]: I1011 03:56:24.487637 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:24 crc kubenswrapper[4798]: I1011 03:56:24.487661 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:24Z","lastTransitionTime":"2025-10-11T03:56:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:24 crc kubenswrapper[4798]: I1011 03:56:24.591285 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:24 crc kubenswrapper[4798]: I1011 03:56:24.591342 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:24 crc kubenswrapper[4798]: I1011 03:56:24.591359 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:24 crc kubenswrapper[4798]: I1011 03:56:24.591385 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:24 crc kubenswrapper[4798]: I1011 03:56:24.591446 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:24Z","lastTransitionTime":"2025-10-11T03:56:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:24 crc kubenswrapper[4798]: I1011 03:56:24.695075 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:24 crc kubenswrapper[4798]: I1011 03:56:24.695168 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:24 crc kubenswrapper[4798]: I1011 03:56:24.695188 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:24 crc kubenswrapper[4798]: I1011 03:56:24.695222 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:24 crc kubenswrapper[4798]: I1011 03:56:24.695259 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:24Z","lastTransitionTime":"2025-10-11T03:56:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:24 crc kubenswrapper[4798]: I1011 03:56:24.799163 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:24 crc kubenswrapper[4798]: I1011 03:56:24.799228 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:24 crc kubenswrapper[4798]: I1011 03:56:24.799249 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:24 crc kubenswrapper[4798]: I1011 03:56:24.799277 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:24 crc kubenswrapper[4798]: I1011 03:56:24.799296 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:24Z","lastTransitionTime":"2025-10-11T03:56:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:24 crc kubenswrapper[4798]: I1011 03:56:24.902720 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:24 crc kubenswrapper[4798]: I1011 03:56:24.902824 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:24 crc kubenswrapper[4798]: I1011 03:56:24.902848 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:24 crc kubenswrapper[4798]: I1011 03:56:24.902883 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:24 crc kubenswrapper[4798]: I1011 03:56:24.902911 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:24Z","lastTransitionTime":"2025-10-11T03:56:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:25 crc kubenswrapper[4798]: I1011 03:56:25.006548 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:25 crc kubenswrapper[4798]: I1011 03:56:25.006624 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:25 crc kubenswrapper[4798]: I1011 03:56:25.006647 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:25 crc kubenswrapper[4798]: I1011 03:56:25.006675 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:25 crc kubenswrapper[4798]: I1011 03:56:25.006694 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:25Z","lastTransitionTime":"2025-10-11T03:56:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:25 crc kubenswrapper[4798]: I1011 03:56:25.110820 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:25 crc kubenswrapper[4798]: I1011 03:56:25.110886 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:25 crc kubenswrapper[4798]: I1011 03:56:25.110906 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:25 crc kubenswrapper[4798]: I1011 03:56:25.110935 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:25 crc kubenswrapper[4798]: I1011 03:56:25.110955 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:25Z","lastTransitionTime":"2025-10-11T03:56:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:25 crc kubenswrapper[4798]: I1011 03:56:25.214420 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:25 crc kubenswrapper[4798]: I1011 03:56:25.215236 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:25 crc kubenswrapper[4798]: I1011 03:56:25.215281 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:25 crc kubenswrapper[4798]: I1011 03:56:25.215327 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:25 crc kubenswrapper[4798]: I1011 03:56:25.215354 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:25Z","lastTransitionTime":"2025-10-11T03:56:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:25 crc kubenswrapper[4798]: I1011 03:56:25.318471 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:25 crc kubenswrapper[4798]: I1011 03:56:25.319059 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:25 crc kubenswrapper[4798]: I1011 03:56:25.319263 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:25 crc kubenswrapper[4798]: I1011 03:56:25.319513 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:25 crc kubenswrapper[4798]: I1011 03:56:25.319722 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:25Z","lastTransitionTime":"2025-10-11T03:56:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:25 crc kubenswrapper[4798]: I1011 03:56:25.422377 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:25 crc kubenswrapper[4798]: I1011 03:56:25.422436 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:25 crc kubenswrapper[4798]: I1011 03:56:25.422445 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:25 crc kubenswrapper[4798]: I1011 03:56:25.422468 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:25 crc kubenswrapper[4798]: I1011 03:56:25.422485 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:25Z","lastTransitionTime":"2025-10-11T03:56:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:25 crc kubenswrapper[4798]: I1011 03:56:25.422569 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 11 03:56:25 crc kubenswrapper[4798]: I1011 03:56:25.422659 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 11 03:56:25 crc kubenswrapper[4798]: E1011 03:56:25.422716 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 11 03:56:25 crc kubenswrapper[4798]: E1011 03:56:25.422838 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 11 03:56:25 crc kubenswrapper[4798]: I1011 03:56:25.422867 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-5bfzt" Oct 11 03:56:25 crc kubenswrapper[4798]: I1011 03:56:25.422953 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 11 03:56:25 crc kubenswrapper[4798]: E1011 03:56:25.422985 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-5bfzt" podUID="3cf2b185-9a26-4448-8fc5-4885f98daf87" Oct 11 03:56:25 crc kubenswrapper[4798]: E1011 03:56:25.423133 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 11 03:56:25 crc kubenswrapper[4798]: I1011 03:56:25.437155 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:25Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:25 crc kubenswrapper[4798]: I1011 03:56:25.456302 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bfd670e2-5a6d-4d56-97d6-c72b5474cfe6\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d79289a0b2f4d9eee836f57ba898bd329060194772e80855547f1982371b0921\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1970a648bf4ee991b9b58bfade524bee260b45f727cba5864a6545a10487b3ae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a79edde80930f3aad632c14d6ad6b3df6f8449f32107a82eb906785e873e1ab7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4b8e09f509a9f25922d49024be407873d79d01edb604193f9d19fcd6c89cf96d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://805f894634e76814dc3071315ddd3d8ac46cdf65a12e97f4ca56b98fe53ab25a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-11T03:55:19Z\\\",\\\"message\\\":\\\"W1011 03:55:08.564227 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1011 03:55:08.564630 1 crypto.go:601] Generating new CA for check-endpoints-signer@1760154908 cert, and key in /tmp/serving-cert-1527351980/serving-signer.crt, /tmp/serving-cert-1527351980/serving-signer.key\\\\nI1011 03:55:08.869630 1 observer_polling.go:159] Starting file observer\\\\nW1011 03:55:08.874583 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1011 03:55:08.874721 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1011 03:55:08.878058 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1527351980/tls.crt::/tmp/serving-cert-1527351980/tls.key\\\\\\\"\\\\nF1011 03:55:19.813421 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:08Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://58ca7c079d81ba990c4508c024d6db7c3503be1d628477798459b32e2374ef22\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:08Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1171e7b719c532d99d1d222fc5dbc434060d5d149bd2b6b3dea02f9716816b26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1171e7b719c532d99d1d222fc5dbc434060d5d149bd2b6b3dea02f9716816b26\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:05Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:25Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:25 crc kubenswrapper[4798]: I1011 03:56:25.470156 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2f4f8842-e378-44a9-b919-b1bf9a8df80e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://903e13c07c622087e30f21ffff2b60c31c852d8e994c23d7af6beab5a71bb873\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1087e7b8de486a192155ed94497edbca0dc98e4a84a577267fb132fc48e6b142\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://08a33c11cee6a48ebcf66f251111b8bd888bd40fe6b52f43a3768eac817ed59f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eff419f507fc5362e956419c6fd14dc6a161f7297b026a5ec449647051f98767\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:05Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:25Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:25 crc kubenswrapper[4798]: I1011 03:56:25.483731 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4936cbe98df337a2b4d0d6e3effe0afd990facff65daf923c475c9108e05fef9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:25Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:25 crc kubenswrapper[4798]: I1011 03:56:25.497888 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:25Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:25 crc kubenswrapper[4798]: I1011 03:56:25.521864 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kszgd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e999fc8d-dd8d-415f-b72b-1c6189cf18a9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a92a11778001614d226c95af9dde787c78232568c3b5c073618a843f1225b21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h6wm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kszgd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:25Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:25 crc kubenswrapper[4798]: I1011 03:56:25.531557 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:25 crc kubenswrapper[4798]: I1011 03:56:25.531637 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:25 crc kubenswrapper[4798]: I1011 03:56:25.531651 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:25 crc kubenswrapper[4798]: I1011 03:56:25.531690 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:25 crc kubenswrapper[4798]: I1011 03:56:25.531705 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:25Z","lastTransitionTime":"2025-10-11T03:56:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:25 crc kubenswrapper[4798]: I1011 03:56:25.546831 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d9be402e-40bb-4e2d-be8b-349646c34f1a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6897a04b7204031c861d74f6a4d475af9e9b88220cf02bfa50e7845c85cd5f70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b88dcbb90798e97c13f7e7ef7bc7171bb1dc33cf488bd453a8d263a02093dfd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b2fb58514dfb39402d4e10ca699975671a3c53a7fe54b374a987efa8ba401ccf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5844d6ac6f40fe8f9056708ee6ffe64a95b58a4dd50582e455860ce6e5628cd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5844d6ac6f40fe8f9056708ee6ffe64a95b58a4dd50582e455860ce6e5628cd3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:06Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:05Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:25Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:25 crc kubenswrapper[4798]: I1011 03:56:25.566931 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5ec61df088ad49a0392264c33da84b7324c4aa608201d71e309401ab80013fb8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://10697a88b117962307b632f70354acdfd493e92c2909908a31c90c754dcbc0ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:25Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:25 crc kubenswrapper[4798]: I1011 03:56:25.581206 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-5bfzt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3cf2b185-9a26-4448-8fc5-4885f98daf87\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jdt7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jdt7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:39Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-5bfzt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:25Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:25 crc kubenswrapper[4798]: I1011 03:56:25.602269 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6129cc4f78a62c5fb76f4859ec954c8901954d27b7f05672e97d992e0c933191\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:25Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:25 crc kubenswrapper[4798]: I1011 03:56:25.619052 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-p6xdd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bd9c0e44-3329-422a-907b-e9e9bb6194cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:56:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:56:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b5b37de1a8a4b221f421a2bf4f4706f9b2e4ce65611438e10e1bf07daa2bf25a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a46e4bfff29af67bd6304e68df8fb4aa7e2184648b85398f453fde878b27d825\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-11T03:56:13Z\\\",\\\"message\\\":\\\"2025-10-11T03:55:27+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_80fdfc1b-8b4c-4573-acb4-89213eeebcec\\\\n2025-10-11T03:55:27+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_80fdfc1b-8b4c-4573-acb4-89213eeebcec to /host/opt/cni/bin/\\\\n2025-10-11T03:55:27Z [verbose] multus-daemon started\\\\n2025-10-11T03:55:27Z [verbose] Readiness Indicator file check\\\\n2025-10-11T03:56:12Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:56:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p9wst\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-p6xdd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:25Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:25 crc kubenswrapper[4798]: I1011 03:56:25.635638 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-5sxhs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"89cbb20d-9fdb-4fed-9ad2-acb1207afc46\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a3655b5826d719e04f0972221ea72554a15025acffa8e2689b4b2855d6151d9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z7jvd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f0c589033410abafbc39c6a7386343c70bb92ed0a1d7336188a44cc6e4fd65c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z7jvd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-5sxhs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:25Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:25 crc kubenswrapper[4798]: I1011 03:56:25.637332 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:25 crc kubenswrapper[4798]: I1011 03:56:25.637413 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:25 crc kubenswrapper[4798]: I1011 03:56:25.637435 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:25 crc kubenswrapper[4798]: I1011 03:56:25.637464 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:25 crc kubenswrapper[4798]: I1011 03:56:25.637486 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:25Z","lastTransitionTime":"2025-10-11T03:56:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:25 crc kubenswrapper[4798]: I1011 03:56:25.649715 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:25Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:25 crc kubenswrapper[4798]: I1011 03:56:25.665499 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42571bc8-2186-4e3b-bba9-28f5a8f364d0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c66c40f71f0877eee058976f94625fe1c4bcab5dfad363be39e6863c2e9f07a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5gdw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d4862faa12fa49bd57098b8b4e9e9dd287da8c8808a9ca32bfdd3a9b4313945d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5gdw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-h28s2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:25Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:25 crc kubenswrapper[4798]: I1011 03:56:25.683214 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-7fk5l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4b6a179c-1d23-4cb2-91b9-d34496a74456\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5bfabccc875a12072ad81dc6bf74d849c99baa004c10d213f798c363248137db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b908ba64c3d6d1d45e1cab82e69176c3c84bf88dde3e9425fc7c3b4c84685ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8b908ba64c3d6d1d45e1cab82e69176c3c84bf88dde3e9425fc7c3b4c84685ed\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae38c0781a4a8367bba95648253e72409e06cdc39825b137084fa6f4d33a4cb7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ae38c0781a4a8367bba95648253e72409e06cdc39825b137084fa6f4d33a4cb7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b663ffff34d80940880b3959c81519084b55bd5edcedbce9402d5fad1f898034\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b663ffff34d80940880b3959c81519084b55bd5edcedbce9402d5fad1f898034\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5b4df13fbe7bbb9ba49f772c2bd4bfe6a10a17bfa12bc500f0647659be7b0cdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5b4df13fbe7bbb9ba49f772c2bd4bfe6a10a17bfa12bc500f0647659be7b0cdf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://997fd5172ccd393f7befe4343619dd30eba136428035ac9fc71f0ec99791a53b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://997fd5172ccd393f7befe4343619dd30eba136428035ac9fc71f0ec99791a53b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://20198b637812738041fd2e57cdb24097b8f92af726b1ad103fee423469a27136\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://20198b637812738041fd2e57cdb24097b8f92af726b1ad103fee423469a27136\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-7fk5l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:25Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:25 crc kubenswrapper[4798]: I1011 03:56:25.717627 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-svt4z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3c2a342b-5252-4957-9e2c-8f81c304b8af\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5a66de04a274f07f090f66d9da727e1985f9c171038de366b409a0300d1b6cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5667e3d7288833ff8c576f64f111c3a2f59619a3577f969d0afdd633962bd30d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dd63d88fe2ebe6035765ee3f3c05c2ef747f9b2869da239825c08667935c9fb5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://671945e25068ff79ab8574dd96b2d81646b8240da41f34a10aa857f0c2520df2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://337fb496a29d8027f6400b57b1625b68029a16e368139f753b9b94df22448477\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c9529e6c53d42e111799753c41ef620b382b2a2b1991aa71d4ee846029a5136\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://66e2aa76f109bbdb0ec131c0724c460f5c1313b096e2fb3ebf1e42fd4832c88b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://66e2aa76f109bbdb0ec131c0724c460f5c1313b096e2fb3ebf1e42fd4832c88b\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-11T03:56:21Z\\\",\\\"message\\\":\\\"c8-46726fd37529 0xc007364d2d 0xc007364d2e}] [] []},Spec:ServiceSpec{Ports:[]ServicePort{ServicePort{Name:grpc,Protocol:TCP,Port:50051,TargetPort:{0 50051 },NodePort:0,AppProtocol:nil,},},Selector:map[string]string{olm.catalogSource: community-operators,olm.managed: true,},ClusterIP:10.217.5.189,Type:ClusterIP,ExternalIPs:[],SessionAffinity:None,LoadBalancerIP:,LoadBalancerSourceRanges:[],ExternalName:,ExternalTrafficPolicy:,HealthCheckNodePort:0,PublishNotReadyAddresses:false,SessionAffinityConfig:nil,IPFamilyPolicy:*SingleStack,ClusterIPs:[10.217.5.189],IPFamilies:[IPv4],AllocateLoadBalancerNodePorts:nil,LoadBalancerClass:nil,InternalTrafficPolicy:*Cluster,TrafficDistribution:nil,},Status:ServiceStatus{LoadBalancer:LoadBalancerStatus{Ingress:[]LoadBalancerIngress{},},Conditions:[]Condition{},},}\\\\nF1011 03:56:21.403655 6812 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurre\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-11T03:56:20Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-svt4z_openshift-ovn-kubernetes(3c2a342b-5252-4957-9e2c-8f81c304b8af)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://732433bbe785758ed4cbedf667608c4780a5d1e7e2eafd432735f0d74ea01d48\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9f8c139ffa0a5dc24b40f6d5bcc2fa9422f58b275692d3ce3ba5a928c1e454a7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f8c139ffa0a5dc24b40f6d5bcc2fa9422f58b275692d3ce3ba5a928c1e454a7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:26Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-svt4z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:25Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:25 crc kubenswrapper[4798]: I1011 03:56:25.730425 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-cwk9h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a97c4ffd-df63-4ce6-8903-80849a23a9fa\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3dc58b162f1d2b3447282c149ec7cd6d60b4c53fa3cf51dfb4a821d115c9a2c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pfvbf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:28Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-cwk9h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:25Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:25 crc kubenswrapper[4798]: I1011 03:56:25.740764 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:25 crc kubenswrapper[4798]: I1011 03:56:25.740816 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:25 crc kubenswrapper[4798]: I1011 03:56:25.740832 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:25 crc kubenswrapper[4798]: I1011 03:56:25.740856 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:25 crc kubenswrapper[4798]: I1011 03:56:25.740872 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:25Z","lastTransitionTime":"2025-10-11T03:56:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:25 crc kubenswrapper[4798]: I1011 03:56:25.843693 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:25 crc kubenswrapper[4798]: I1011 03:56:25.843825 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:25 crc kubenswrapper[4798]: I1011 03:56:25.843884 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:25 crc kubenswrapper[4798]: I1011 03:56:25.843913 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:25 crc kubenswrapper[4798]: I1011 03:56:25.843933 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:25Z","lastTransitionTime":"2025-10-11T03:56:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:25 crc kubenswrapper[4798]: I1011 03:56:25.946475 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:25 crc kubenswrapper[4798]: I1011 03:56:25.946545 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:25 crc kubenswrapper[4798]: I1011 03:56:25.946563 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:25 crc kubenswrapper[4798]: I1011 03:56:25.946589 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:25 crc kubenswrapper[4798]: I1011 03:56:25.946607 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:25Z","lastTransitionTime":"2025-10-11T03:56:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:26 crc kubenswrapper[4798]: I1011 03:56:26.049668 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:26 crc kubenswrapper[4798]: I1011 03:56:26.049813 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:26 crc kubenswrapper[4798]: I1011 03:56:26.049845 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:26 crc kubenswrapper[4798]: I1011 03:56:26.049886 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:26 crc kubenswrapper[4798]: I1011 03:56:26.049917 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:26Z","lastTransitionTime":"2025-10-11T03:56:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:26 crc kubenswrapper[4798]: I1011 03:56:26.154004 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:26 crc kubenswrapper[4798]: I1011 03:56:26.154112 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:26 crc kubenswrapper[4798]: I1011 03:56:26.154135 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:26 crc kubenswrapper[4798]: I1011 03:56:26.154169 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:26 crc kubenswrapper[4798]: I1011 03:56:26.154196 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:26Z","lastTransitionTime":"2025-10-11T03:56:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:26 crc kubenswrapper[4798]: I1011 03:56:26.260630 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:26 crc kubenswrapper[4798]: I1011 03:56:26.260692 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:26 crc kubenswrapper[4798]: I1011 03:56:26.260709 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:26 crc kubenswrapper[4798]: I1011 03:56:26.260737 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:26 crc kubenswrapper[4798]: I1011 03:56:26.260755 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:26Z","lastTransitionTime":"2025-10-11T03:56:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:26 crc kubenswrapper[4798]: I1011 03:56:26.363415 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:26 crc kubenswrapper[4798]: I1011 03:56:26.363451 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:26 crc kubenswrapper[4798]: I1011 03:56:26.363459 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:26 crc kubenswrapper[4798]: I1011 03:56:26.363472 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:26 crc kubenswrapper[4798]: I1011 03:56:26.363480 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:26Z","lastTransitionTime":"2025-10-11T03:56:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:26 crc kubenswrapper[4798]: I1011 03:56:26.466425 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:26 crc kubenswrapper[4798]: I1011 03:56:26.466501 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:26 crc kubenswrapper[4798]: I1011 03:56:26.466521 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:26 crc kubenswrapper[4798]: I1011 03:56:26.466554 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:26 crc kubenswrapper[4798]: I1011 03:56:26.466577 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:26Z","lastTransitionTime":"2025-10-11T03:56:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:26 crc kubenswrapper[4798]: I1011 03:56:26.569651 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:26 crc kubenswrapper[4798]: I1011 03:56:26.569697 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:26 crc kubenswrapper[4798]: I1011 03:56:26.569707 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:26 crc kubenswrapper[4798]: I1011 03:56:26.569722 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:26 crc kubenswrapper[4798]: I1011 03:56:26.569733 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:26Z","lastTransitionTime":"2025-10-11T03:56:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:26 crc kubenswrapper[4798]: I1011 03:56:26.672644 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:26 crc kubenswrapper[4798]: I1011 03:56:26.672695 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:26 crc kubenswrapper[4798]: I1011 03:56:26.672707 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:26 crc kubenswrapper[4798]: I1011 03:56:26.672726 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:26 crc kubenswrapper[4798]: I1011 03:56:26.672738 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:26Z","lastTransitionTime":"2025-10-11T03:56:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:26 crc kubenswrapper[4798]: I1011 03:56:26.777024 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:26 crc kubenswrapper[4798]: I1011 03:56:26.777080 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:26 crc kubenswrapper[4798]: I1011 03:56:26.777099 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:26 crc kubenswrapper[4798]: I1011 03:56:26.777123 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:26 crc kubenswrapper[4798]: I1011 03:56:26.777138 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:26Z","lastTransitionTime":"2025-10-11T03:56:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:26 crc kubenswrapper[4798]: I1011 03:56:26.880523 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:26 crc kubenswrapper[4798]: I1011 03:56:26.880581 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:26 crc kubenswrapper[4798]: I1011 03:56:26.880592 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:26 crc kubenswrapper[4798]: I1011 03:56:26.880607 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:26 crc kubenswrapper[4798]: I1011 03:56:26.880618 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:26Z","lastTransitionTime":"2025-10-11T03:56:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:26 crc kubenswrapper[4798]: I1011 03:56:26.983204 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:26 crc kubenswrapper[4798]: I1011 03:56:26.983301 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:26 crc kubenswrapper[4798]: I1011 03:56:26.983357 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:26 crc kubenswrapper[4798]: I1011 03:56:26.983381 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:26 crc kubenswrapper[4798]: I1011 03:56:26.983415 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:26Z","lastTransitionTime":"2025-10-11T03:56:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:27 crc kubenswrapper[4798]: I1011 03:56:27.086671 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:27 crc kubenswrapper[4798]: I1011 03:56:27.086713 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:27 crc kubenswrapper[4798]: I1011 03:56:27.086724 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:27 crc kubenswrapper[4798]: I1011 03:56:27.086740 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:27 crc kubenswrapper[4798]: I1011 03:56:27.086750 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:27Z","lastTransitionTime":"2025-10-11T03:56:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:27 crc kubenswrapper[4798]: I1011 03:56:27.189911 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:27 crc kubenswrapper[4798]: I1011 03:56:27.189971 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:27 crc kubenswrapper[4798]: I1011 03:56:27.189985 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:27 crc kubenswrapper[4798]: I1011 03:56:27.190009 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:27 crc kubenswrapper[4798]: I1011 03:56:27.190028 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:27Z","lastTransitionTime":"2025-10-11T03:56:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:27 crc kubenswrapper[4798]: I1011 03:56:27.293712 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:27 crc kubenswrapper[4798]: I1011 03:56:27.293778 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:27 crc kubenswrapper[4798]: I1011 03:56:27.293793 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:27 crc kubenswrapper[4798]: I1011 03:56:27.293824 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:27 crc kubenswrapper[4798]: I1011 03:56:27.293841 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:27Z","lastTransitionTime":"2025-10-11T03:56:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:27 crc kubenswrapper[4798]: I1011 03:56:27.398303 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:27 crc kubenswrapper[4798]: I1011 03:56:27.398441 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:27 crc kubenswrapper[4798]: I1011 03:56:27.398469 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:27 crc kubenswrapper[4798]: I1011 03:56:27.398507 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:27 crc kubenswrapper[4798]: I1011 03:56:27.398535 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:27Z","lastTransitionTime":"2025-10-11T03:56:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:27 crc kubenswrapper[4798]: I1011 03:56:27.423204 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 11 03:56:27 crc kubenswrapper[4798]: E1011 03:56:27.423378 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 11 03:56:27 crc kubenswrapper[4798]: I1011 03:56:27.423808 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 11 03:56:27 crc kubenswrapper[4798]: I1011 03:56:27.423922 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-5bfzt" Oct 11 03:56:27 crc kubenswrapper[4798]: E1011 03:56:27.424092 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 11 03:56:27 crc kubenswrapper[4798]: I1011 03:56:27.424241 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 11 03:56:27 crc kubenswrapper[4798]: E1011 03:56:27.424594 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-5bfzt" podUID="3cf2b185-9a26-4448-8fc5-4885f98daf87" Oct 11 03:56:27 crc kubenswrapper[4798]: E1011 03:56:27.424744 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 11 03:56:27 crc kubenswrapper[4798]: I1011 03:56:27.507116 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:27 crc kubenswrapper[4798]: I1011 03:56:27.507228 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:27 crc kubenswrapper[4798]: I1011 03:56:27.507259 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:27 crc kubenswrapper[4798]: I1011 03:56:27.507295 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:27 crc kubenswrapper[4798]: I1011 03:56:27.507324 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:27Z","lastTransitionTime":"2025-10-11T03:56:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:27 crc kubenswrapper[4798]: I1011 03:56:27.611715 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:27 crc kubenswrapper[4798]: I1011 03:56:27.611797 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:27 crc kubenswrapper[4798]: I1011 03:56:27.611855 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:27 crc kubenswrapper[4798]: I1011 03:56:27.611899 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:27 crc kubenswrapper[4798]: I1011 03:56:27.611937 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:27Z","lastTransitionTime":"2025-10-11T03:56:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:27 crc kubenswrapper[4798]: I1011 03:56:27.715420 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:27 crc kubenswrapper[4798]: I1011 03:56:27.715464 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:27 crc kubenswrapper[4798]: I1011 03:56:27.715476 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:27 crc kubenswrapper[4798]: I1011 03:56:27.715492 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:27 crc kubenswrapper[4798]: I1011 03:56:27.715503 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:27Z","lastTransitionTime":"2025-10-11T03:56:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:27 crc kubenswrapper[4798]: I1011 03:56:27.817995 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:27 crc kubenswrapper[4798]: I1011 03:56:27.818062 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:27 crc kubenswrapper[4798]: I1011 03:56:27.818081 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:27 crc kubenswrapper[4798]: I1011 03:56:27.818108 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:27 crc kubenswrapper[4798]: I1011 03:56:27.818185 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:27Z","lastTransitionTime":"2025-10-11T03:56:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:27 crc kubenswrapper[4798]: I1011 03:56:27.921638 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:27 crc kubenswrapper[4798]: I1011 03:56:27.921736 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:27 crc kubenswrapper[4798]: I1011 03:56:27.921762 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:27 crc kubenswrapper[4798]: I1011 03:56:27.921802 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:27 crc kubenswrapper[4798]: I1011 03:56:27.921828 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:27Z","lastTransitionTime":"2025-10-11T03:56:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:28 crc kubenswrapper[4798]: I1011 03:56:28.024902 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:28 crc kubenswrapper[4798]: I1011 03:56:28.024981 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:28 crc kubenswrapper[4798]: I1011 03:56:28.025000 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:28 crc kubenswrapper[4798]: I1011 03:56:28.025032 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:28 crc kubenswrapper[4798]: I1011 03:56:28.025054 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:28Z","lastTransitionTime":"2025-10-11T03:56:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:28 crc kubenswrapper[4798]: I1011 03:56:28.128628 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:28 crc kubenswrapper[4798]: I1011 03:56:28.128683 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:28 crc kubenswrapper[4798]: I1011 03:56:28.128697 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:28 crc kubenswrapper[4798]: I1011 03:56:28.128718 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:28 crc kubenswrapper[4798]: I1011 03:56:28.128728 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:28Z","lastTransitionTime":"2025-10-11T03:56:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:28 crc kubenswrapper[4798]: I1011 03:56:28.231986 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:28 crc kubenswrapper[4798]: I1011 03:56:28.232052 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:28 crc kubenswrapper[4798]: I1011 03:56:28.232075 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:28 crc kubenswrapper[4798]: I1011 03:56:28.232107 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:28 crc kubenswrapper[4798]: I1011 03:56:28.232127 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:28Z","lastTransitionTime":"2025-10-11T03:56:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:28 crc kubenswrapper[4798]: I1011 03:56:28.336250 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:28 crc kubenswrapper[4798]: I1011 03:56:28.336332 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:28 crc kubenswrapper[4798]: I1011 03:56:28.336357 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:28 crc kubenswrapper[4798]: I1011 03:56:28.336388 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:28 crc kubenswrapper[4798]: I1011 03:56:28.336456 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:28Z","lastTransitionTime":"2025-10-11T03:56:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:28 crc kubenswrapper[4798]: I1011 03:56:28.439888 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:28 crc kubenswrapper[4798]: I1011 03:56:28.439955 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:28 crc kubenswrapper[4798]: I1011 03:56:28.439981 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:28 crc kubenswrapper[4798]: I1011 03:56:28.440029 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:28 crc kubenswrapper[4798]: I1011 03:56:28.440053 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:28Z","lastTransitionTime":"2025-10-11T03:56:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:28 crc kubenswrapper[4798]: I1011 03:56:28.543703 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:28 crc kubenswrapper[4798]: I1011 03:56:28.543793 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:28 crc kubenswrapper[4798]: I1011 03:56:28.543817 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:28 crc kubenswrapper[4798]: I1011 03:56:28.543853 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:28 crc kubenswrapper[4798]: I1011 03:56:28.543879 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:28Z","lastTransitionTime":"2025-10-11T03:56:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:28 crc kubenswrapper[4798]: I1011 03:56:28.647241 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:28 crc kubenswrapper[4798]: I1011 03:56:28.647300 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:28 crc kubenswrapper[4798]: I1011 03:56:28.647314 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:28 crc kubenswrapper[4798]: I1011 03:56:28.647335 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:28 crc kubenswrapper[4798]: I1011 03:56:28.647347 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:28Z","lastTransitionTime":"2025-10-11T03:56:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:28 crc kubenswrapper[4798]: I1011 03:56:28.751226 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:28 crc kubenswrapper[4798]: I1011 03:56:28.751317 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:28 crc kubenswrapper[4798]: I1011 03:56:28.751339 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:28 crc kubenswrapper[4798]: I1011 03:56:28.751369 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:28 crc kubenswrapper[4798]: I1011 03:56:28.751423 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:28Z","lastTransitionTime":"2025-10-11T03:56:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:28 crc kubenswrapper[4798]: I1011 03:56:28.854510 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:28 crc kubenswrapper[4798]: I1011 03:56:28.854579 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:28 crc kubenswrapper[4798]: I1011 03:56:28.854609 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:28 crc kubenswrapper[4798]: I1011 03:56:28.854644 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:28 crc kubenswrapper[4798]: I1011 03:56:28.854672 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:28Z","lastTransitionTime":"2025-10-11T03:56:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:28 crc kubenswrapper[4798]: I1011 03:56:28.957271 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:28 crc kubenswrapper[4798]: I1011 03:56:28.957332 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:28 crc kubenswrapper[4798]: I1011 03:56:28.957346 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:28 crc kubenswrapper[4798]: I1011 03:56:28.957370 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:28 crc kubenswrapper[4798]: I1011 03:56:28.957385 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:28Z","lastTransitionTime":"2025-10-11T03:56:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:29 crc kubenswrapper[4798]: I1011 03:56:29.061808 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:29 crc kubenswrapper[4798]: I1011 03:56:29.061893 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:29 crc kubenswrapper[4798]: I1011 03:56:29.061963 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:29 crc kubenswrapper[4798]: I1011 03:56:29.061997 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:29 crc kubenswrapper[4798]: I1011 03:56:29.062023 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:29Z","lastTransitionTime":"2025-10-11T03:56:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:29 crc kubenswrapper[4798]: I1011 03:56:29.165306 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:29 crc kubenswrapper[4798]: I1011 03:56:29.165428 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:29 crc kubenswrapper[4798]: I1011 03:56:29.165451 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:29 crc kubenswrapper[4798]: I1011 03:56:29.165482 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:29 crc kubenswrapper[4798]: I1011 03:56:29.165503 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:29Z","lastTransitionTime":"2025-10-11T03:56:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:29 crc kubenswrapper[4798]: I1011 03:56:29.217613 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 11 03:56:29 crc kubenswrapper[4798]: I1011 03:56:29.217803 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 11 03:56:29 crc kubenswrapper[4798]: I1011 03:56:29.217869 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 11 03:56:29 crc kubenswrapper[4798]: I1011 03:56:29.217933 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 11 03:56:29 crc kubenswrapper[4798]: E1011 03:56:29.217950 4798 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Oct 11 03:56:29 crc kubenswrapper[4798]: E1011 03:56:29.218275 4798 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-10-11 03:57:33.218233285 +0000 UTC m=+148.554523011 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Oct 11 03:56:29 crc kubenswrapper[4798]: E1011 03:56:29.218031 4798 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Oct 11 03:56:29 crc kubenswrapper[4798]: E1011 03:56:29.218137 4798 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Oct 11 03:56:29 crc kubenswrapper[4798]: E1011 03:56:29.218563 4798 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Oct 11 03:56:29 crc kubenswrapper[4798]: E1011 03:56:29.218584 4798 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Oct 11 03:56:29 crc kubenswrapper[4798]: E1011 03:56:29.218335 4798 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Oct 11 03:56:29 crc kubenswrapper[4798]: E1011 03:56:29.218672 4798 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Oct 11 03:56:29 crc kubenswrapper[4798]: E1011 03:56:29.218691 4798 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Oct 11 03:56:29 crc kubenswrapper[4798]: E1011 03:56:29.218528 4798 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-10-11 03:57:33.21847638 +0000 UTC m=+148.554766216 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Oct 11 03:56:29 crc kubenswrapper[4798]: E1011 03:56:29.218774 4798 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-10-11 03:57:33.218757476 +0000 UTC m=+148.555047162 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Oct 11 03:56:29 crc kubenswrapper[4798]: E1011 03:56:29.218795 4798 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-10-11 03:57:33.218789127 +0000 UTC m=+148.555078813 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Oct 11 03:56:29 crc kubenswrapper[4798]: I1011 03:56:29.268369 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:29 crc kubenswrapper[4798]: I1011 03:56:29.268470 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:29 crc kubenswrapper[4798]: I1011 03:56:29.268491 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:29 crc kubenswrapper[4798]: I1011 03:56:29.268519 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:29 crc kubenswrapper[4798]: I1011 03:56:29.268538 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:29Z","lastTransitionTime":"2025-10-11T03:56:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:29 crc kubenswrapper[4798]: I1011 03:56:29.319285 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 03:56:29 crc kubenswrapper[4798]: E1011 03:56:29.319578 4798 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-11 03:57:33.319543757 +0000 UTC m=+148.655833493 (durationBeforeRetry 1m4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 03:56:29 crc kubenswrapper[4798]: I1011 03:56:29.371760 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:29 crc kubenswrapper[4798]: I1011 03:56:29.371832 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:29 crc kubenswrapper[4798]: I1011 03:56:29.371852 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:29 crc kubenswrapper[4798]: I1011 03:56:29.371883 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:29 crc kubenswrapper[4798]: I1011 03:56:29.371905 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:29Z","lastTransitionTime":"2025-10-11T03:56:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:29 crc kubenswrapper[4798]: I1011 03:56:29.422694 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-5bfzt" Oct 11 03:56:29 crc kubenswrapper[4798]: I1011 03:56:29.422654 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 11 03:56:29 crc kubenswrapper[4798]: I1011 03:56:29.423314 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 11 03:56:29 crc kubenswrapper[4798]: I1011 03:56:29.423144 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 11 03:56:29 crc kubenswrapper[4798]: E1011 03:56:29.423650 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 11 03:56:29 crc kubenswrapper[4798]: E1011 03:56:29.423880 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 11 03:56:29 crc kubenswrapper[4798]: E1011 03:56:29.424193 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 11 03:56:29 crc kubenswrapper[4798]: E1011 03:56:29.424681 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-5bfzt" podUID="3cf2b185-9a26-4448-8fc5-4885f98daf87" Oct 11 03:56:29 crc kubenswrapper[4798]: I1011 03:56:29.474455 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:29 crc kubenswrapper[4798]: I1011 03:56:29.474518 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:29 crc kubenswrapper[4798]: I1011 03:56:29.474535 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:29 crc kubenswrapper[4798]: I1011 03:56:29.474556 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:29 crc kubenswrapper[4798]: I1011 03:56:29.474569 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:29Z","lastTransitionTime":"2025-10-11T03:56:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:29 crc kubenswrapper[4798]: I1011 03:56:29.577192 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:29 crc kubenswrapper[4798]: I1011 03:56:29.577287 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:29 crc kubenswrapper[4798]: I1011 03:56:29.577306 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:29 crc kubenswrapper[4798]: I1011 03:56:29.577337 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:29 crc kubenswrapper[4798]: I1011 03:56:29.577359 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:29Z","lastTransitionTime":"2025-10-11T03:56:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:29 crc kubenswrapper[4798]: I1011 03:56:29.681201 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:29 crc kubenswrapper[4798]: I1011 03:56:29.681311 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:29 crc kubenswrapper[4798]: I1011 03:56:29.681344 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:29 crc kubenswrapper[4798]: I1011 03:56:29.681385 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:29 crc kubenswrapper[4798]: I1011 03:56:29.681477 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:29Z","lastTransitionTime":"2025-10-11T03:56:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:29 crc kubenswrapper[4798]: I1011 03:56:29.785209 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:29 crc kubenswrapper[4798]: I1011 03:56:29.785294 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:29 crc kubenswrapper[4798]: I1011 03:56:29.785313 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:29 crc kubenswrapper[4798]: I1011 03:56:29.785339 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:29 crc kubenswrapper[4798]: I1011 03:56:29.785358 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:29Z","lastTransitionTime":"2025-10-11T03:56:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:29 crc kubenswrapper[4798]: I1011 03:56:29.888371 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:29 crc kubenswrapper[4798]: I1011 03:56:29.888479 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:29 crc kubenswrapper[4798]: I1011 03:56:29.888500 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:29 crc kubenswrapper[4798]: I1011 03:56:29.888528 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:29 crc kubenswrapper[4798]: I1011 03:56:29.888548 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:29Z","lastTransitionTime":"2025-10-11T03:56:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:29 crc kubenswrapper[4798]: I1011 03:56:29.992314 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:29 crc kubenswrapper[4798]: I1011 03:56:29.992509 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:29 crc kubenswrapper[4798]: I1011 03:56:29.992554 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:29 crc kubenswrapper[4798]: I1011 03:56:29.992603 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:29 crc kubenswrapper[4798]: I1011 03:56:29.992628 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:29Z","lastTransitionTime":"2025-10-11T03:56:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:30 crc kubenswrapper[4798]: I1011 03:56:30.095917 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:30 crc kubenswrapper[4798]: I1011 03:56:30.095980 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:30 crc kubenswrapper[4798]: I1011 03:56:30.095998 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:30 crc kubenswrapper[4798]: I1011 03:56:30.096024 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:30 crc kubenswrapper[4798]: I1011 03:56:30.096040 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:30Z","lastTransitionTime":"2025-10-11T03:56:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:30 crc kubenswrapper[4798]: I1011 03:56:30.199046 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:30 crc kubenswrapper[4798]: I1011 03:56:30.199107 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:30 crc kubenswrapper[4798]: I1011 03:56:30.199120 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:30 crc kubenswrapper[4798]: I1011 03:56:30.199145 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:30 crc kubenswrapper[4798]: I1011 03:56:30.199161 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:30Z","lastTransitionTime":"2025-10-11T03:56:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:30 crc kubenswrapper[4798]: I1011 03:56:30.303675 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:30 crc kubenswrapper[4798]: I1011 03:56:30.303789 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:30 crc kubenswrapper[4798]: I1011 03:56:30.303812 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:30 crc kubenswrapper[4798]: I1011 03:56:30.303844 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:30 crc kubenswrapper[4798]: I1011 03:56:30.303869 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:30Z","lastTransitionTime":"2025-10-11T03:56:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:30 crc kubenswrapper[4798]: I1011 03:56:30.408222 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:30 crc kubenswrapper[4798]: I1011 03:56:30.408323 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:30 crc kubenswrapper[4798]: I1011 03:56:30.408350 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:30 crc kubenswrapper[4798]: I1011 03:56:30.408387 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:30 crc kubenswrapper[4798]: I1011 03:56:30.408449 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:30Z","lastTransitionTime":"2025-10-11T03:56:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:30 crc kubenswrapper[4798]: I1011 03:56:30.511350 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:30 crc kubenswrapper[4798]: I1011 03:56:30.511433 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:30 crc kubenswrapper[4798]: I1011 03:56:30.511451 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:30 crc kubenswrapper[4798]: I1011 03:56:30.511469 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:30 crc kubenswrapper[4798]: I1011 03:56:30.511482 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:30Z","lastTransitionTime":"2025-10-11T03:56:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:30 crc kubenswrapper[4798]: I1011 03:56:30.614025 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:30 crc kubenswrapper[4798]: I1011 03:56:30.614224 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:30 crc kubenswrapper[4798]: I1011 03:56:30.614261 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:30 crc kubenswrapper[4798]: I1011 03:56:30.614302 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:30 crc kubenswrapper[4798]: I1011 03:56:30.614333 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:30Z","lastTransitionTime":"2025-10-11T03:56:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:30 crc kubenswrapper[4798]: I1011 03:56:30.717627 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:30 crc kubenswrapper[4798]: I1011 03:56:30.717700 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:30 crc kubenswrapper[4798]: I1011 03:56:30.717720 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:30 crc kubenswrapper[4798]: I1011 03:56:30.717756 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:30 crc kubenswrapper[4798]: I1011 03:56:30.717779 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:30Z","lastTransitionTime":"2025-10-11T03:56:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:30 crc kubenswrapper[4798]: I1011 03:56:30.822092 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:30 crc kubenswrapper[4798]: I1011 03:56:30.822167 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:30 crc kubenswrapper[4798]: I1011 03:56:30.822187 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:30 crc kubenswrapper[4798]: I1011 03:56:30.822213 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:30 crc kubenswrapper[4798]: I1011 03:56:30.822233 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:30Z","lastTransitionTime":"2025-10-11T03:56:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:30 crc kubenswrapper[4798]: I1011 03:56:30.926805 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:30 crc kubenswrapper[4798]: I1011 03:56:30.926861 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:30 crc kubenswrapper[4798]: I1011 03:56:30.926873 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:30 crc kubenswrapper[4798]: I1011 03:56:30.926892 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:30 crc kubenswrapper[4798]: I1011 03:56:30.926909 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:30Z","lastTransitionTime":"2025-10-11T03:56:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:31 crc kubenswrapper[4798]: I1011 03:56:31.030691 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:31 crc kubenswrapper[4798]: I1011 03:56:31.030787 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:31 crc kubenswrapper[4798]: I1011 03:56:31.030816 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:31 crc kubenswrapper[4798]: I1011 03:56:31.030851 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:31 crc kubenswrapper[4798]: I1011 03:56:31.030880 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:31Z","lastTransitionTime":"2025-10-11T03:56:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:31 crc kubenswrapper[4798]: I1011 03:56:31.134212 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:31 crc kubenswrapper[4798]: I1011 03:56:31.134293 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:31 crc kubenswrapper[4798]: I1011 03:56:31.134313 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:31 crc kubenswrapper[4798]: I1011 03:56:31.134343 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:31 crc kubenswrapper[4798]: I1011 03:56:31.134360 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:31Z","lastTransitionTime":"2025-10-11T03:56:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:31 crc kubenswrapper[4798]: I1011 03:56:31.238492 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:31 crc kubenswrapper[4798]: I1011 03:56:31.238554 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:31 crc kubenswrapper[4798]: I1011 03:56:31.238570 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:31 crc kubenswrapper[4798]: I1011 03:56:31.238596 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:31 crc kubenswrapper[4798]: I1011 03:56:31.238616 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:31Z","lastTransitionTime":"2025-10-11T03:56:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:31 crc kubenswrapper[4798]: I1011 03:56:31.342377 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:31 crc kubenswrapper[4798]: I1011 03:56:31.342504 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:31 crc kubenswrapper[4798]: I1011 03:56:31.342534 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:31 crc kubenswrapper[4798]: I1011 03:56:31.342571 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:31 crc kubenswrapper[4798]: I1011 03:56:31.342596 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:31Z","lastTransitionTime":"2025-10-11T03:56:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:31 crc kubenswrapper[4798]: I1011 03:56:31.423457 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-5bfzt" Oct 11 03:56:31 crc kubenswrapper[4798]: I1011 03:56:31.423541 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 11 03:56:31 crc kubenswrapper[4798]: I1011 03:56:31.423608 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 11 03:56:31 crc kubenswrapper[4798]: I1011 03:56:31.423567 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 11 03:56:31 crc kubenswrapper[4798]: E1011 03:56:31.423732 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-5bfzt" podUID="3cf2b185-9a26-4448-8fc5-4885f98daf87" Oct 11 03:56:31 crc kubenswrapper[4798]: E1011 03:56:31.423986 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 11 03:56:31 crc kubenswrapper[4798]: E1011 03:56:31.424302 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 11 03:56:31 crc kubenswrapper[4798]: E1011 03:56:31.424576 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 11 03:56:31 crc kubenswrapper[4798]: I1011 03:56:31.446244 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:31 crc kubenswrapper[4798]: I1011 03:56:31.446347 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:31 crc kubenswrapper[4798]: I1011 03:56:31.446377 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:31 crc kubenswrapper[4798]: I1011 03:56:31.446501 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:31 crc kubenswrapper[4798]: I1011 03:56:31.446531 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:31Z","lastTransitionTime":"2025-10-11T03:56:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:31 crc kubenswrapper[4798]: I1011 03:56:31.551124 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:31 crc kubenswrapper[4798]: I1011 03:56:31.551197 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:31 crc kubenswrapper[4798]: I1011 03:56:31.551215 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:31 crc kubenswrapper[4798]: I1011 03:56:31.551248 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:31 crc kubenswrapper[4798]: I1011 03:56:31.551268 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:31Z","lastTransitionTime":"2025-10-11T03:56:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:31 crc kubenswrapper[4798]: I1011 03:56:31.655007 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:31 crc kubenswrapper[4798]: I1011 03:56:31.655069 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:31 crc kubenswrapper[4798]: I1011 03:56:31.655087 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:31 crc kubenswrapper[4798]: I1011 03:56:31.655113 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:31 crc kubenswrapper[4798]: I1011 03:56:31.655133 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:31Z","lastTransitionTime":"2025-10-11T03:56:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:31 crc kubenswrapper[4798]: I1011 03:56:31.764163 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:31 crc kubenswrapper[4798]: I1011 03:56:31.764244 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:31 crc kubenswrapper[4798]: I1011 03:56:31.764313 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:31 crc kubenswrapper[4798]: I1011 03:56:31.764379 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:31 crc kubenswrapper[4798]: I1011 03:56:31.764448 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:31Z","lastTransitionTime":"2025-10-11T03:56:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:31 crc kubenswrapper[4798]: I1011 03:56:31.872554 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:31 crc kubenswrapper[4798]: I1011 03:56:31.872615 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:31 crc kubenswrapper[4798]: I1011 03:56:31.872631 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:31 crc kubenswrapper[4798]: I1011 03:56:31.872653 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:31 crc kubenswrapper[4798]: I1011 03:56:31.872670 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:31Z","lastTransitionTime":"2025-10-11T03:56:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:31 crc kubenswrapper[4798]: I1011 03:56:31.975594 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:31 crc kubenswrapper[4798]: I1011 03:56:31.975666 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:31 crc kubenswrapper[4798]: I1011 03:56:31.975684 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:31 crc kubenswrapper[4798]: I1011 03:56:31.975735 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:31 crc kubenswrapper[4798]: I1011 03:56:31.975754 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:31Z","lastTransitionTime":"2025-10-11T03:56:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:32 crc kubenswrapper[4798]: I1011 03:56:32.079000 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:32 crc kubenswrapper[4798]: I1011 03:56:32.079078 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:32 crc kubenswrapper[4798]: I1011 03:56:32.079097 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:32 crc kubenswrapper[4798]: I1011 03:56:32.079126 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:32 crc kubenswrapper[4798]: I1011 03:56:32.079148 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:32Z","lastTransitionTime":"2025-10-11T03:56:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:32 crc kubenswrapper[4798]: I1011 03:56:32.182592 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:32 crc kubenswrapper[4798]: I1011 03:56:32.182645 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:32 crc kubenswrapper[4798]: I1011 03:56:32.182655 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:32 crc kubenswrapper[4798]: I1011 03:56:32.182673 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:32 crc kubenswrapper[4798]: I1011 03:56:32.182683 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:32Z","lastTransitionTime":"2025-10-11T03:56:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:32 crc kubenswrapper[4798]: I1011 03:56:32.285910 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:32 crc kubenswrapper[4798]: I1011 03:56:32.285976 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:32 crc kubenswrapper[4798]: I1011 03:56:32.285994 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:32 crc kubenswrapper[4798]: I1011 03:56:32.286021 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:32 crc kubenswrapper[4798]: I1011 03:56:32.286041 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:32Z","lastTransitionTime":"2025-10-11T03:56:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:32 crc kubenswrapper[4798]: I1011 03:56:32.389783 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:32 crc kubenswrapper[4798]: I1011 03:56:32.389835 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:32 crc kubenswrapper[4798]: I1011 03:56:32.389847 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:32 crc kubenswrapper[4798]: I1011 03:56:32.389875 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:32 crc kubenswrapper[4798]: I1011 03:56:32.389900 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:32Z","lastTransitionTime":"2025-10-11T03:56:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:32 crc kubenswrapper[4798]: I1011 03:56:32.446814 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd/etcd-crc"] Oct 11 03:56:32 crc kubenswrapper[4798]: I1011 03:56:32.498429 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:32 crc kubenswrapper[4798]: I1011 03:56:32.498511 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:32 crc kubenswrapper[4798]: I1011 03:56:32.498526 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:32 crc kubenswrapper[4798]: I1011 03:56:32.498557 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:32 crc kubenswrapper[4798]: I1011 03:56:32.498574 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:32Z","lastTransitionTime":"2025-10-11T03:56:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:32 crc kubenswrapper[4798]: I1011 03:56:32.602204 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:32 crc kubenswrapper[4798]: I1011 03:56:32.602270 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:32 crc kubenswrapper[4798]: I1011 03:56:32.602283 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:32 crc kubenswrapper[4798]: I1011 03:56:32.602303 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:32 crc kubenswrapper[4798]: I1011 03:56:32.602319 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:32Z","lastTransitionTime":"2025-10-11T03:56:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:32 crc kubenswrapper[4798]: I1011 03:56:32.705436 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:32 crc kubenswrapper[4798]: I1011 03:56:32.705495 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:32 crc kubenswrapper[4798]: I1011 03:56:32.705510 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:32 crc kubenswrapper[4798]: I1011 03:56:32.705534 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:32 crc kubenswrapper[4798]: I1011 03:56:32.705549 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:32Z","lastTransitionTime":"2025-10-11T03:56:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:32 crc kubenswrapper[4798]: I1011 03:56:32.808447 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:32 crc kubenswrapper[4798]: I1011 03:56:32.808485 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:32 crc kubenswrapper[4798]: I1011 03:56:32.808496 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:32 crc kubenswrapper[4798]: I1011 03:56:32.808513 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:32 crc kubenswrapper[4798]: I1011 03:56:32.808524 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:32Z","lastTransitionTime":"2025-10-11T03:56:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:32 crc kubenswrapper[4798]: I1011 03:56:32.911697 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:32 crc kubenswrapper[4798]: I1011 03:56:32.911796 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:32 crc kubenswrapper[4798]: I1011 03:56:32.911823 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:32 crc kubenswrapper[4798]: I1011 03:56:32.911864 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:32 crc kubenswrapper[4798]: I1011 03:56:32.911897 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:32Z","lastTransitionTime":"2025-10-11T03:56:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:33 crc kubenswrapper[4798]: I1011 03:56:33.016335 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:33 crc kubenswrapper[4798]: I1011 03:56:33.016469 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:33 crc kubenswrapper[4798]: I1011 03:56:33.016493 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:33 crc kubenswrapper[4798]: I1011 03:56:33.016526 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:33 crc kubenswrapper[4798]: I1011 03:56:33.016544 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:33Z","lastTransitionTime":"2025-10-11T03:56:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:33 crc kubenswrapper[4798]: I1011 03:56:33.119732 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:33 crc kubenswrapper[4798]: I1011 03:56:33.119822 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:33 crc kubenswrapper[4798]: I1011 03:56:33.119847 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:33 crc kubenswrapper[4798]: I1011 03:56:33.119874 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:33 crc kubenswrapper[4798]: I1011 03:56:33.119892 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:33Z","lastTransitionTime":"2025-10-11T03:56:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:33 crc kubenswrapper[4798]: I1011 03:56:33.126778 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:33 crc kubenswrapper[4798]: I1011 03:56:33.126837 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:33 crc kubenswrapper[4798]: I1011 03:56:33.126855 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:33 crc kubenswrapper[4798]: I1011 03:56:33.126877 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:33 crc kubenswrapper[4798]: I1011 03:56:33.126895 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:33Z","lastTransitionTime":"2025-10-11T03:56:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:33 crc kubenswrapper[4798]: E1011 03:56:33.146881 4798 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T03:56:33Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T03:56:33Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T03:56:33Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T03:56:33Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T03:56:33Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T03:56:33Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T03:56:33Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T03:56:33Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"319d317c-5958-4e72-8ec1-75f04665505c\\\",\\\"systemUUID\\\":\\\"914a85fd-a642-4471-b13e-f33764f494b0\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:33Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:33 crc kubenswrapper[4798]: I1011 03:56:33.152723 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:33 crc kubenswrapper[4798]: I1011 03:56:33.152811 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:33 crc kubenswrapper[4798]: I1011 03:56:33.152837 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:33 crc kubenswrapper[4798]: I1011 03:56:33.152869 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:33 crc kubenswrapper[4798]: I1011 03:56:33.152895 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:33Z","lastTransitionTime":"2025-10-11T03:56:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:33 crc kubenswrapper[4798]: E1011 03:56:33.174091 4798 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T03:56:33Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T03:56:33Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T03:56:33Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T03:56:33Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T03:56:33Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T03:56:33Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T03:56:33Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T03:56:33Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"319d317c-5958-4e72-8ec1-75f04665505c\\\",\\\"systemUUID\\\":\\\"914a85fd-a642-4471-b13e-f33764f494b0\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:33Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:33 crc kubenswrapper[4798]: I1011 03:56:33.180650 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:33 crc kubenswrapper[4798]: I1011 03:56:33.180737 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:33 crc kubenswrapper[4798]: I1011 03:56:33.180764 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:33 crc kubenswrapper[4798]: I1011 03:56:33.180800 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:33 crc kubenswrapper[4798]: I1011 03:56:33.180825 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:33Z","lastTransitionTime":"2025-10-11T03:56:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:33 crc kubenswrapper[4798]: E1011 03:56:33.201771 4798 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T03:56:33Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T03:56:33Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T03:56:33Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T03:56:33Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T03:56:33Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T03:56:33Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T03:56:33Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T03:56:33Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"319d317c-5958-4e72-8ec1-75f04665505c\\\",\\\"systemUUID\\\":\\\"914a85fd-a642-4471-b13e-f33764f494b0\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:33Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:33 crc kubenswrapper[4798]: I1011 03:56:33.208012 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:33 crc kubenswrapper[4798]: I1011 03:56:33.208062 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:33 crc kubenswrapper[4798]: I1011 03:56:33.208077 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:33 crc kubenswrapper[4798]: I1011 03:56:33.208099 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:33 crc kubenswrapper[4798]: I1011 03:56:33.208117 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:33Z","lastTransitionTime":"2025-10-11T03:56:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:33 crc kubenswrapper[4798]: E1011 03:56:33.226536 4798 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T03:56:33Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T03:56:33Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T03:56:33Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T03:56:33Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T03:56:33Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T03:56:33Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T03:56:33Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T03:56:33Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"319d317c-5958-4e72-8ec1-75f04665505c\\\",\\\"systemUUID\\\":\\\"914a85fd-a642-4471-b13e-f33764f494b0\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:33Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:33 crc kubenswrapper[4798]: I1011 03:56:33.230581 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:33 crc kubenswrapper[4798]: I1011 03:56:33.230641 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:33 crc kubenswrapper[4798]: I1011 03:56:33.230658 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:33 crc kubenswrapper[4798]: I1011 03:56:33.230679 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:33 crc kubenswrapper[4798]: I1011 03:56:33.230694 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:33Z","lastTransitionTime":"2025-10-11T03:56:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:33 crc kubenswrapper[4798]: E1011 03:56:33.252749 4798 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T03:56:33Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T03:56:33Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T03:56:33Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T03:56:33Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T03:56:33Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T03:56:33Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T03:56:33Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T03:56:33Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"319d317c-5958-4e72-8ec1-75f04665505c\\\",\\\"systemUUID\\\":\\\"914a85fd-a642-4471-b13e-f33764f494b0\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:33Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:33 crc kubenswrapper[4798]: E1011 03:56:33.253009 4798 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Oct 11 03:56:33 crc kubenswrapper[4798]: I1011 03:56:33.255018 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:33 crc kubenswrapper[4798]: I1011 03:56:33.255078 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:33 crc kubenswrapper[4798]: I1011 03:56:33.255090 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:33 crc kubenswrapper[4798]: I1011 03:56:33.255107 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:33 crc kubenswrapper[4798]: I1011 03:56:33.255141 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:33Z","lastTransitionTime":"2025-10-11T03:56:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:33 crc kubenswrapper[4798]: I1011 03:56:33.358147 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:33 crc kubenswrapper[4798]: I1011 03:56:33.358208 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:33 crc kubenswrapper[4798]: I1011 03:56:33.358225 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:33 crc kubenswrapper[4798]: I1011 03:56:33.358248 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:33 crc kubenswrapper[4798]: I1011 03:56:33.358265 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:33Z","lastTransitionTime":"2025-10-11T03:56:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:33 crc kubenswrapper[4798]: I1011 03:56:33.423347 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 11 03:56:33 crc kubenswrapper[4798]: I1011 03:56:33.423565 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 11 03:56:33 crc kubenswrapper[4798]: I1011 03:56:33.423623 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 11 03:56:33 crc kubenswrapper[4798]: E1011 03:56:33.423789 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 11 03:56:33 crc kubenswrapper[4798]: I1011 03:56:33.423870 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-5bfzt" Oct 11 03:56:33 crc kubenswrapper[4798]: E1011 03:56:33.424163 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-5bfzt" podUID="3cf2b185-9a26-4448-8fc5-4885f98daf87" Oct 11 03:56:33 crc kubenswrapper[4798]: E1011 03:56:33.424289 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 11 03:56:33 crc kubenswrapper[4798]: E1011 03:56:33.424419 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 11 03:56:33 crc kubenswrapper[4798]: I1011 03:56:33.438564 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/kube-rbac-proxy-crio-crc"] Oct 11 03:56:33 crc kubenswrapper[4798]: I1011 03:56:33.461484 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:33 crc kubenswrapper[4798]: I1011 03:56:33.461543 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:33 crc kubenswrapper[4798]: I1011 03:56:33.461559 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:33 crc kubenswrapper[4798]: I1011 03:56:33.461580 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:33 crc kubenswrapper[4798]: I1011 03:56:33.461595 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:33Z","lastTransitionTime":"2025-10-11T03:56:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:33 crc kubenswrapper[4798]: I1011 03:56:33.564601 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:33 crc kubenswrapper[4798]: I1011 03:56:33.564939 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:33 crc kubenswrapper[4798]: I1011 03:56:33.565055 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:33 crc kubenswrapper[4798]: I1011 03:56:33.565160 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:33 crc kubenswrapper[4798]: I1011 03:56:33.565258 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:33Z","lastTransitionTime":"2025-10-11T03:56:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:33 crc kubenswrapper[4798]: I1011 03:56:33.668450 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:33 crc kubenswrapper[4798]: I1011 03:56:33.668967 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:33 crc kubenswrapper[4798]: I1011 03:56:33.668987 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:33 crc kubenswrapper[4798]: I1011 03:56:33.669016 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:33 crc kubenswrapper[4798]: I1011 03:56:33.669037 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:33Z","lastTransitionTime":"2025-10-11T03:56:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:33 crc kubenswrapper[4798]: I1011 03:56:33.772275 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:33 crc kubenswrapper[4798]: I1011 03:56:33.772343 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:33 crc kubenswrapper[4798]: I1011 03:56:33.772363 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:33 crc kubenswrapper[4798]: I1011 03:56:33.772517 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:33 crc kubenswrapper[4798]: I1011 03:56:33.772552 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:33Z","lastTransitionTime":"2025-10-11T03:56:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:33 crc kubenswrapper[4798]: I1011 03:56:33.875883 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:33 crc kubenswrapper[4798]: I1011 03:56:33.875987 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:33 crc kubenswrapper[4798]: I1011 03:56:33.876042 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:33 crc kubenswrapper[4798]: I1011 03:56:33.876070 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:33 crc kubenswrapper[4798]: I1011 03:56:33.876089 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:33Z","lastTransitionTime":"2025-10-11T03:56:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:33 crc kubenswrapper[4798]: I1011 03:56:33.979493 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:33 crc kubenswrapper[4798]: I1011 03:56:33.979547 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:33 crc kubenswrapper[4798]: I1011 03:56:33.979561 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:33 crc kubenswrapper[4798]: I1011 03:56:33.979578 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:33 crc kubenswrapper[4798]: I1011 03:56:33.979591 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:33Z","lastTransitionTime":"2025-10-11T03:56:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:34 crc kubenswrapper[4798]: I1011 03:56:34.083085 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:34 crc kubenswrapper[4798]: I1011 03:56:34.083187 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:34 crc kubenswrapper[4798]: I1011 03:56:34.083214 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:34 crc kubenswrapper[4798]: I1011 03:56:34.083248 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:34 crc kubenswrapper[4798]: I1011 03:56:34.083276 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:34Z","lastTransitionTime":"2025-10-11T03:56:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:34 crc kubenswrapper[4798]: I1011 03:56:34.187535 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:34 crc kubenswrapper[4798]: I1011 03:56:34.187592 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:34 crc kubenswrapper[4798]: I1011 03:56:34.187611 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:34 crc kubenswrapper[4798]: I1011 03:56:34.187641 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:34 crc kubenswrapper[4798]: I1011 03:56:34.187661 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:34Z","lastTransitionTime":"2025-10-11T03:56:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:34 crc kubenswrapper[4798]: I1011 03:56:34.290713 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:34 crc kubenswrapper[4798]: I1011 03:56:34.290773 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:34 crc kubenswrapper[4798]: I1011 03:56:34.290786 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:34 crc kubenswrapper[4798]: I1011 03:56:34.290806 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:34 crc kubenswrapper[4798]: I1011 03:56:34.290818 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:34Z","lastTransitionTime":"2025-10-11T03:56:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:34 crc kubenswrapper[4798]: I1011 03:56:34.393339 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:34 crc kubenswrapper[4798]: I1011 03:56:34.393476 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:34 crc kubenswrapper[4798]: I1011 03:56:34.393496 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:34 crc kubenswrapper[4798]: I1011 03:56:34.393518 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:34 crc kubenswrapper[4798]: I1011 03:56:34.393535 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:34Z","lastTransitionTime":"2025-10-11T03:56:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:34 crc kubenswrapper[4798]: I1011 03:56:34.495852 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:34 crc kubenswrapper[4798]: I1011 03:56:34.495891 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:34 crc kubenswrapper[4798]: I1011 03:56:34.495901 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:34 crc kubenswrapper[4798]: I1011 03:56:34.495917 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:34 crc kubenswrapper[4798]: I1011 03:56:34.495929 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:34Z","lastTransitionTime":"2025-10-11T03:56:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:34 crc kubenswrapper[4798]: I1011 03:56:34.599855 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:34 crc kubenswrapper[4798]: I1011 03:56:34.599916 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:34 crc kubenswrapper[4798]: I1011 03:56:34.599929 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:34 crc kubenswrapper[4798]: I1011 03:56:34.599946 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:34 crc kubenswrapper[4798]: I1011 03:56:34.599960 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:34Z","lastTransitionTime":"2025-10-11T03:56:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:34 crc kubenswrapper[4798]: I1011 03:56:34.702733 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:34 crc kubenswrapper[4798]: I1011 03:56:34.702777 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:34 crc kubenswrapper[4798]: I1011 03:56:34.702792 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:34 crc kubenswrapper[4798]: I1011 03:56:34.702846 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:34 crc kubenswrapper[4798]: I1011 03:56:34.702861 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:34Z","lastTransitionTime":"2025-10-11T03:56:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:34 crc kubenswrapper[4798]: I1011 03:56:34.806338 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:34 crc kubenswrapper[4798]: I1011 03:56:34.806389 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:34 crc kubenswrapper[4798]: I1011 03:56:34.806451 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:34 crc kubenswrapper[4798]: I1011 03:56:34.806473 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:34 crc kubenswrapper[4798]: I1011 03:56:34.806491 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:34Z","lastTransitionTime":"2025-10-11T03:56:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:34 crc kubenswrapper[4798]: I1011 03:56:34.910681 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:34 crc kubenswrapper[4798]: I1011 03:56:34.910735 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:34 crc kubenswrapper[4798]: I1011 03:56:34.910748 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:34 crc kubenswrapper[4798]: I1011 03:56:34.910768 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:34 crc kubenswrapper[4798]: I1011 03:56:34.910785 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:34Z","lastTransitionTime":"2025-10-11T03:56:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:35 crc kubenswrapper[4798]: I1011 03:56:35.013293 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:35 crc kubenswrapper[4798]: I1011 03:56:35.013356 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:35 crc kubenswrapper[4798]: I1011 03:56:35.013374 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:35 crc kubenswrapper[4798]: I1011 03:56:35.013418 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:35 crc kubenswrapper[4798]: I1011 03:56:35.013440 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:35Z","lastTransitionTime":"2025-10-11T03:56:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:35 crc kubenswrapper[4798]: I1011 03:56:35.116302 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:35 crc kubenswrapper[4798]: I1011 03:56:35.116363 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:35 crc kubenswrapper[4798]: I1011 03:56:35.116377 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:35 crc kubenswrapper[4798]: I1011 03:56:35.116421 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:35 crc kubenswrapper[4798]: I1011 03:56:35.116435 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:35Z","lastTransitionTime":"2025-10-11T03:56:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:35 crc kubenswrapper[4798]: I1011 03:56:35.219679 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:35 crc kubenswrapper[4798]: I1011 03:56:35.219738 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:35 crc kubenswrapper[4798]: I1011 03:56:35.219762 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:35 crc kubenswrapper[4798]: I1011 03:56:35.219781 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:35 crc kubenswrapper[4798]: I1011 03:56:35.219796 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:35Z","lastTransitionTime":"2025-10-11T03:56:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:35 crc kubenswrapper[4798]: I1011 03:56:35.323128 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:35 crc kubenswrapper[4798]: I1011 03:56:35.323172 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:35 crc kubenswrapper[4798]: I1011 03:56:35.323188 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:35 crc kubenswrapper[4798]: I1011 03:56:35.323206 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:35 crc kubenswrapper[4798]: I1011 03:56:35.323219 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:35Z","lastTransitionTime":"2025-10-11T03:56:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:35 crc kubenswrapper[4798]: I1011 03:56:35.422899 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-5bfzt" Oct 11 03:56:35 crc kubenswrapper[4798]: E1011 03:56:35.423020 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-5bfzt" podUID="3cf2b185-9a26-4448-8fc5-4885f98daf87" Oct 11 03:56:35 crc kubenswrapper[4798]: I1011 03:56:35.423181 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 11 03:56:35 crc kubenswrapper[4798]: E1011 03:56:35.423223 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 11 03:56:35 crc kubenswrapper[4798]: I1011 03:56:35.423313 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 11 03:56:35 crc kubenswrapper[4798]: E1011 03:56:35.423355 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 11 03:56:35 crc kubenswrapper[4798]: I1011 03:56:35.423497 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 11 03:56:35 crc kubenswrapper[4798]: E1011 03:56:35.423546 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 11 03:56:35 crc kubenswrapper[4798]: I1011 03:56:35.424202 4798 scope.go:117] "RemoveContainer" containerID="66e2aa76f109bbdb0ec131c0724c460f5c1313b096e2fb3ebf1e42fd4832c88b" Oct 11 03:56:35 crc kubenswrapper[4798]: E1011 03:56:35.424321 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-svt4z_openshift-ovn-kubernetes(3c2a342b-5252-4957-9e2c-8f81c304b8af)\"" pod="openshift-ovn-kubernetes/ovnkube-node-svt4z" podUID="3c2a342b-5252-4957-9e2c-8f81c304b8af" Oct 11 03:56:35 crc kubenswrapper[4798]: I1011 03:56:35.426214 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:35 crc kubenswrapper[4798]: I1011 03:56:35.426269 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:35 crc kubenswrapper[4798]: I1011 03:56:35.426299 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:35 crc kubenswrapper[4798]: I1011 03:56:35.426329 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:35 crc kubenswrapper[4798]: I1011 03:56:35.426351 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:35Z","lastTransitionTime":"2025-10-11T03:56:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:35 crc kubenswrapper[4798]: I1011 03:56:35.440453 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d9be402e-40bb-4e2d-be8b-349646c34f1a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6897a04b7204031c861d74f6a4d475af9e9b88220cf02bfa50e7845c85cd5f70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b88dcbb90798e97c13f7e7ef7bc7171bb1dc33cf488bd453a8d263a02093dfd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b2fb58514dfb39402d4e10ca699975671a3c53a7fe54b374a987efa8ba401ccf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5844d6ac6f40fe8f9056708ee6ffe64a95b58a4dd50582e455860ce6e5628cd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5844d6ac6f40fe8f9056708ee6ffe64a95b58a4dd50582e455860ce6e5628cd3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:06Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:05Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:35Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:35 crc kubenswrapper[4798]: I1011 03:56:35.470965 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5ec61df088ad49a0392264c33da84b7324c4aa608201d71e309401ab80013fb8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://10697a88b117962307b632f70354acdfd493e92c2909908a31c90c754dcbc0ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:35Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:35 crc kubenswrapper[4798]: I1011 03:56:35.498253 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-5bfzt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3cf2b185-9a26-4448-8fc5-4885f98daf87\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jdt7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jdt7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:39Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-5bfzt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:35Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:35 crc kubenswrapper[4798]: I1011 03:56:35.526681 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-p6xdd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bd9c0e44-3329-422a-907b-e9e9bb6194cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:56:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:56:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b5b37de1a8a4b221f421a2bf4f4706f9b2e4ce65611438e10e1bf07daa2bf25a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a46e4bfff29af67bd6304e68df8fb4aa7e2184648b85398f453fde878b27d825\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-11T03:56:13Z\\\",\\\"message\\\":\\\"2025-10-11T03:55:27+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_80fdfc1b-8b4c-4573-acb4-89213eeebcec\\\\n2025-10-11T03:55:27+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_80fdfc1b-8b4c-4573-acb4-89213eeebcec to /host/opt/cni/bin/\\\\n2025-10-11T03:55:27Z [verbose] multus-daemon started\\\\n2025-10-11T03:55:27Z [verbose] Readiness Indicator file check\\\\n2025-10-11T03:56:12Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:56:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p9wst\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-p6xdd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:35Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:35 crc kubenswrapper[4798]: I1011 03:56:35.529029 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:35 crc kubenswrapper[4798]: I1011 03:56:35.529059 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:35 crc kubenswrapper[4798]: I1011 03:56:35.529071 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:35 crc kubenswrapper[4798]: I1011 03:56:35.529088 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:35 crc kubenswrapper[4798]: I1011 03:56:35.529130 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:35Z","lastTransitionTime":"2025-10-11T03:56:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:35 crc kubenswrapper[4798]: I1011 03:56:35.541265 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-5sxhs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"89cbb20d-9fdb-4fed-9ad2-acb1207afc46\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a3655b5826d719e04f0972221ea72554a15025acffa8e2689b4b2855d6151d9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z7jvd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f0c589033410abafbc39c6a7386343c70bb92ed0a1d7336188a44cc6e4fd65c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z7jvd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-5sxhs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:35Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:35 crc kubenswrapper[4798]: I1011 03:56:35.555145 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"acb5d285-f601-4afe-8a14-cadd3a0f0dde\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1baa4c767a0681f8f483382dbcb45c01a8d63b7e9065744e1be1267cf3d4dc36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a697e8d1614e94c59ae1935733518a6de4259ea7eddbbe686b933d3f453b0f8d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a697e8d1614e94c59ae1935733518a6de4259ea7eddbbe686b933d3f453b0f8d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:05Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:35Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:35 crc kubenswrapper[4798]: I1011 03:56:35.575037 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6762f3fe-b434-4380-a406-7d52e18a0d17\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73da1fe1784a04b0babc804ccb6862856c863b9f793d96535c9ff59c5a0430f9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a0f1fc9aeb16230733964ca136c4fb9da847f42355e2087c563594c7420a3eb4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea9c51e79aa7e8f9ddfb800fb9d0cf94fb5d551210acb8e5f836ba30b7b47539\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9c9f44877c10439bb8e546ce041f32308c83f1a547140ec66993c26ad4ee306b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5102a1352bd10f0c1e73a50d61360a7e0d6ed3a3343e19b26142c3aa692d2e5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://889807d3a3e4bc895910c76d1d71dfc36ac6716878ee162c0d9aad3827a01377\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://889807d3a3e4bc895910c76d1d71dfc36ac6716878ee162c0d9aad3827a01377\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc4218c8c72ac3ee30192f5d64284678a7cf532624ccf43730d140385af91a48\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cc4218c8c72ac3ee30192f5d64284678a7cf532624ccf43730d140385af91a48\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:07Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://af2282a68f8ef3673cddb76e5801be2a142e2dc27fdf5c908e49318cfb13cb9e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://af2282a68f8ef3673cddb76e5801be2a142e2dc27fdf5c908e49318cfb13cb9e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:05Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:35Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:35 crc kubenswrapper[4798]: I1011 03:56:35.591620 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6129cc4f78a62c5fb76f4859ec954c8901954d27b7f05672e97d992e0c933191\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:35Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:35 crc kubenswrapper[4798]: I1011 03:56:35.610957 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-svt4z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3c2a342b-5252-4957-9e2c-8f81c304b8af\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5a66de04a274f07f090f66d9da727e1985f9c171038de366b409a0300d1b6cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5667e3d7288833ff8c576f64f111c3a2f59619a3577f969d0afdd633962bd30d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dd63d88fe2ebe6035765ee3f3c05c2ef747f9b2869da239825c08667935c9fb5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://671945e25068ff79ab8574dd96b2d81646b8240da41f34a10aa857f0c2520df2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://337fb496a29d8027f6400b57b1625b68029a16e368139f753b9b94df22448477\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c9529e6c53d42e111799753c41ef620b382b2a2b1991aa71d4ee846029a5136\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://66e2aa76f109bbdb0ec131c0724c460f5c1313b096e2fb3ebf1e42fd4832c88b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://66e2aa76f109bbdb0ec131c0724c460f5c1313b096e2fb3ebf1e42fd4832c88b\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-11T03:56:21Z\\\",\\\"message\\\":\\\"c8-46726fd37529 0xc007364d2d 0xc007364d2e}] [] []},Spec:ServiceSpec{Ports:[]ServicePort{ServicePort{Name:grpc,Protocol:TCP,Port:50051,TargetPort:{0 50051 },NodePort:0,AppProtocol:nil,},},Selector:map[string]string{olm.catalogSource: community-operators,olm.managed: true,},ClusterIP:10.217.5.189,Type:ClusterIP,ExternalIPs:[],SessionAffinity:None,LoadBalancerIP:,LoadBalancerSourceRanges:[],ExternalName:,ExternalTrafficPolicy:,HealthCheckNodePort:0,PublishNotReadyAddresses:false,SessionAffinityConfig:nil,IPFamilyPolicy:*SingleStack,ClusterIPs:[10.217.5.189],IPFamilies:[IPv4],AllocateLoadBalancerNodePorts:nil,LoadBalancerClass:nil,InternalTrafficPolicy:*Cluster,TrafficDistribution:nil,},Status:ServiceStatus{LoadBalancer:LoadBalancerStatus{Ingress:[]LoadBalancerIngress{},},Conditions:[]Condition{},},}\\\\nF1011 03:56:21.403655 6812 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurre\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-11T03:56:20Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-svt4z_openshift-ovn-kubernetes(3c2a342b-5252-4957-9e2c-8f81c304b8af)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://732433bbe785758ed4cbedf667608c4780a5d1e7e2eafd432735f0d74ea01d48\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9f8c139ffa0a5dc24b40f6d5bcc2fa9422f58b275692d3ce3ba5a928c1e454a7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f8c139ffa0a5dc24b40f6d5bcc2fa9422f58b275692d3ce3ba5a928c1e454a7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jtk6h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:26Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-svt4z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:35Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:35 crc kubenswrapper[4798]: I1011 03:56:35.624126 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-cwk9h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a97c4ffd-df63-4ce6-8903-80849a23a9fa\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3dc58b162f1d2b3447282c149ec7cd6d60b4c53fa3cf51dfb4a821d115c9a2c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pfvbf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:28Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-cwk9h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:35Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:35 crc kubenswrapper[4798]: I1011 03:56:35.631095 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:35 crc kubenswrapper[4798]: I1011 03:56:35.631115 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:35 crc kubenswrapper[4798]: I1011 03:56:35.631122 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:35 crc kubenswrapper[4798]: I1011 03:56:35.631135 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:35 crc kubenswrapper[4798]: I1011 03:56:35.631143 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:35Z","lastTransitionTime":"2025-10-11T03:56:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:35 crc kubenswrapper[4798]: I1011 03:56:35.642142 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:35Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:35 crc kubenswrapper[4798]: I1011 03:56:35.656041 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42571bc8-2186-4e3b-bba9-28f5a8f364d0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c66c40f71f0877eee058976f94625fe1c4bcab5dfad363be39e6863c2e9f07a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5gdw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d4862faa12fa49bd57098b8b4e9e9dd287da8c8808a9ca32bfdd3a9b4313945d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5gdw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-h28s2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:35Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:35 crc kubenswrapper[4798]: I1011 03:56:35.674670 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-7fk5l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4b6a179c-1d23-4cb2-91b9-d34496a74456\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5bfabccc875a12072ad81dc6bf74d849c99baa004c10d213f798c363248137db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b908ba64c3d6d1d45e1cab82e69176c3c84bf88dde3e9425fc7c3b4c84685ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8b908ba64c3d6d1d45e1cab82e69176c3c84bf88dde3e9425fc7c3b4c84685ed\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae38c0781a4a8367bba95648253e72409e06cdc39825b137084fa6f4d33a4cb7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ae38c0781a4a8367bba95648253e72409e06cdc39825b137084fa6f4d33a4cb7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b663ffff34d80940880b3959c81519084b55bd5edcedbce9402d5fad1f898034\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b663ffff34d80940880b3959c81519084b55bd5edcedbce9402d5fad1f898034\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5b4df13fbe7bbb9ba49f772c2bd4bfe6a10a17bfa12bc500f0647659be7b0cdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5b4df13fbe7bbb9ba49f772c2bd4bfe6a10a17bfa12bc500f0647659be7b0cdf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://997fd5172ccd393f7befe4343619dd30eba136428035ac9fc71f0ec99791a53b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://997fd5172ccd393f7befe4343619dd30eba136428035ac9fc71f0ec99791a53b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://20198b637812738041fd2e57cdb24097b8f92af726b1ad103fee423469a27136\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://20198b637812738041fd2e57cdb24097b8f92af726b1ad103fee423469a27136\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w7ths\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-7fk5l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:35Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:35 crc kubenswrapper[4798]: I1011 03:56:35.691415 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:35Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:35 crc kubenswrapper[4798]: I1011 03:56:35.702594 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kszgd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e999fc8d-dd8d-415f-b72b-1c6189cf18a9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a92a11778001614d226c95af9dde787c78232568c3b5c073618a843f1225b21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h6wm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kszgd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:35Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:35 crc kubenswrapper[4798]: I1011 03:56:35.718273 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:35Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:35 crc kubenswrapper[4798]: I1011 03:56:35.732963 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bfd670e2-5a6d-4d56-97d6-c72b5474cfe6\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d79289a0b2f4d9eee836f57ba898bd329060194772e80855547f1982371b0921\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1970a648bf4ee991b9b58bfade524bee260b45f727cba5864a6545a10487b3ae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a79edde80930f3aad632c14d6ad6b3df6f8449f32107a82eb906785e873e1ab7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4b8e09f509a9f25922d49024be407873d79d01edb604193f9d19fcd6c89cf96d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://805f894634e76814dc3071315ddd3d8ac46cdf65a12e97f4ca56b98fe53ab25a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-11T03:55:19Z\\\",\\\"message\\\":\\\"W1011 03:55:08.564227 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1011 03:55:08.564630 1 crypto.go:601] Generating new CA for check-endpoints-signer@1760154908 cert, and key in /tmp/serving-cert-1527351980/serving-signer.crt, /tmp/serving-cert-1527351980/serving-signer.key\\\\nI1011 03:55:08.869630 1 observer_polling.go:159] Starting file observer\\\\nW1011 03:55:08.874583 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1011 03:55:08.874721 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1011 03:55:08.878058 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1527351980/tls.crt::/tmp/serving-cert-1527351980/tls.key\\\\\\\"\\\\nF1011 03:55:19.813421 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:08Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://58ca7c079d81ba990c4508c024d6db7c3503be1d628477798459b32e2374ef22\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:08Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1171e7b719c532d99d1d222fc5dbc434060d5d149bd2b6b3dea02f9716816b26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1171e7b719c532d99d1d222fc5dbc434060d5d149bd2b6b3dea02f9716816b26\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T03:55:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T03:55:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:05Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:35Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:35 crc kubenswrapper[4798]: I1011 03:56:35.733497 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:35 crc kubenswrapper[4798]: I1011 03:56:35.733605 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:35 crc kubenswrapper[4798]: I1011 03:56:35.733688 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:35 crc kubenswrapper[4798]: I1011 03:56:35.733771 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:35 crc kubenswrapper[4798]: I1011 03:56:35.733844 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:35Z","lastTransitionTime":"2025-10-11T03:56:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:35 crc kubenswrapper[4798]: I1011 03:56:35.749096 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2f4f8842-e378-44a9-b919-b1bf9a8df80e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://903e13c07c622087e30f21ffff2b60c31c852d8e994c23d7af6beab5a71bb873\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1087e7b8de486a192155ed94497edbca0dc98e4a84a577267fb132fc48e6b142\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://08a33c11cee6a48ebcf66f251111b8bd888bd40fe6b52f43a3768eac817ed59f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eff419f507fc5362e956419c6fd14dc6a161f7297b026a5ec449647051f98767\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T03:55:05Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:35Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:35 crc kubenswrapper[4798]: I1011 03:56:35.769497 4798 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T03:55:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4936cbe98df337a2b4d0d6e3effe0afd990facff65daf923c475c9108e05fef9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T03:55:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T03:56:35Z is after 2025-08-24T17:21:41Z" Oct 11 03:56:35 crc kubenswrapper[4798]: I1011 03:56:35.836443 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:35 crc kubenswrapper[4798]: I1011 03:56:35.836737 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:35 crc kubenswrapper[4798]: I1011 03:56:35.836807 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:35 crc kubenswrapper[4798]: I1011 03:56:35.836875 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:35 crc kubenswrapper[4798]: I1011 03:56:35.836944 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:35Z","lastTransitionTime":"2025-10-11T03:56:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:35 crc kubenswrapper[4798]: I1011 03:56:35.940450 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:35 crc kubenswrapper[4798]: I1011 03:56:35.940499 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:35 crc kubenswrapper[4798]: I1011 03:56:35.940525 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:35 crc kubenswrapper[4798]: I1011 03:56:35.940553 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:35 crc kubenswrapper[4798]: I1011 03:56:35.940572 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:35Z","lastTransitionTime":"2025-10-11T03:56:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:36 crc kubenswrapper[4798]: I1011 03:56:36.044051 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:36 crc kubenswrapper[4798]: I1011 03:56:36.044101 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:36 crc kubenswrapper[4798]: I1011 03:56:36.044112 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:36 crc kubenswrapper[4798]: I1011 03:56:36.044129 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:36 crc kubenswrapper[4798]: I1011 03:56:36.044140 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:36Z","lastTransitionTime":"2025-10-11T03:56:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:36 crc kubenswrapper[4798]: I1011 03:56:36.146898 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:36 crc kubenswrapper[4798]: I1011 03:56:36.146973 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:36 crc kubenswrapper[4798]: I1011 03:56:36.146995 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:36 crc kubenswrapper[4798]: I1011 03:56:36.147025 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:36 crc kubenswrapper[4798]: I1011 03:56:36.147047 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:36Z","lastTransitionTime":"2025-10-11T03:56:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:36 crc kubenswrapper[4798]: I1011 03:56:36.250809 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:36 crc kubenswrapper[4798]: I1011 03:56:36.250903 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:36 crc kubenswrapper[4798]: I1011 03:56:36.250966 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:36 crc kubenswrapper[4798]: I1011 03:56:36.251011 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:36 crc kubenswrapper[4798]: I1011 03:56:36.251039 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:36Z","lastTransitionTime":"2025-10-11T03:56:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:36 crc kubenswrapper[4798]: I1011 03:56:36.354439 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:36 crc kubenswrapper[4798]: I1011 03:56:36.354512 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:36 crc kubenswrapper[4798]: I1011 03:56:36.354530 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:36 crc kubenswrapper[4798]: I1011 03:56:36.354555 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:36 crc kubenswrapper[4798]: I1011 03:56:36.354573 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:36Z","lastTransitionTime":"2025-10-11T03:56:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:36 crc kubenswrapper[4798]: I1011 03:56:36.462451 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:36 crc kubenswrapper[4798]: I1011 03:56:36.462507 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:36 crc kubenswrapper[4798]: I1011 03:56:36.462520 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:36 crc kubenswrapper[4798]: I1011 03:56:36.462537 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:36 crc kubenswrapper[4798]: I1011 03:56:36.462550 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:36Z","lastTransitionTime":"2025-10-11T03:56:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:36 crc kubenswrapper[4798]: I1011 03:56:36.565456 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:36 crc kubenswrapper[4798]: I1011 03:56:36.565510 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:36 crc kubenswrapper[4798]: I1011 03:56:36.565524 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:36 crc kubenswrapper[4798]: I1011 03:56:36.565543 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:36 crc kubenswrapper[4798]: I1011 03:56:36.565561 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:36Z","lastTransitionTime":"2025-10-11T03:56:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:36 crc kubenswrapper[4798]: I1011 03:56:36.668853 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:36 crc kubenswrapper[4798]: I1011 03:56:36.668911 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:36 crc kubenswrapper[4798]: I1011 03:56:36.668931 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:36 crc kubenswrapper[4798]: I1011 03:56:36.668957 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:36 crc kubenswrapper[4798]: I1011 03:56:36.668976 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:36Z","lastTransitionTime":"2025-10-11T03:56:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:36 crc kubenswrapper[4798]: I1011 03:56:36.772650 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:36 crc kubenswrapper[4798]: I1011 03:56:36.772696 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:36 crc kubenswrapper[4798]: I1011 03:56:36.772708 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:36 crc kubenswrapper[4798]: I1011 03:56:36.772724 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:36 crc kubenswrapper[4798]: I1011 03:56:36.772737 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:36Z","lastTransitionTime":"2025-10-11T03:56:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:36 crc kubenswrapper[4798]: I1011 03:56:36.875104 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:36 crc kubenswrapper[4798]: I1011 03:56:36.875148 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:36 crc kubenswrapper[4798]: I1011 03:56:36.875158 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:36 crc kubenswrapper[4798]: I1011 03:56:36.875177 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:36 crc kubenswrapper[4798]: I1011 03:56:36.875190 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:36Z","lastTransitionTime":"2025-10-11T03:56:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:36 crc kubenswrapper[4798]: I1011 03:56:36.978912 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:36 crc kubenswrapper[4798]: I1011 03:56:36.978953 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:36 crc kubenswrapper[4798]: I1011 03:56:36.978964 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:36 crc kubenswrapper[4798]: I1011 03:56:36.978979 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:36 crc kubenswrapper[4798]: I1011 03:56:36.978991 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:36Z","lastTransitionTime":"2025-10-11T03:56:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:37 crc kubenswrapper[4798]: I1011 03:56:37.081665 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:37 crc kubenswrapper[4798]: I1011 03:56:37.081728 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:37 crc kubenswrapper[4798]: I1011 03:56:37.081745 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:37 crc kubenswrapper[4798]: I1011 03:56:37.081767 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:37 crc kubenswrapper[4798]: I1011 03:56:37.081783 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:37Z","lastTransitionTime":"2025-10-11T03:56:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:37 crc kubenswrapper[4798]: I1011 03:56:37.186334 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:37 crc kubenswrapper[4798]: I1011 03:56:37.186485 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:37 crc kubenswrapper[4798]: I1011 03:56:37.186522 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:37 crc kubenswrapper[4798]: I1011 03:56:37.186566 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:37 crc kubenswrapper[4798]: I1011 03:56:37.186595 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:37Z","lastTransitionTime":"2025-10-11T03:56:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:37 crc kubenswrapper[4798]: I1011 03:56:37.290481 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:37 crc kubenswrapper[4798]: I1011 03:56:37.290557 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:37 crc kubenswrapper[4798]: I1011 03:56:37.290576 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:37 crc kubenswrapper[4798]: I1011 03:56:37.290606 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:37 crc kubenswrapper[4798]: I1011 03:56:37.290627 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:37Z","lastTransitionTime":"2025-10-11T03:56:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:37 crc kubenswrapper[4798]: I1011 03:56:37.395057 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:37 crc kubenswrapper[4798]: I1011 03:56:37.395117 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:37 crc kubenswrapper[4798]: I1011 03:56:37.395129 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:37 crc kubenswrapper[4798]: I1011 03:56:37.395151 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:37 crc kubenswrapper[4798]: I1011 03:56:37.395162 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:37Z","lastTransitionTime":"2025-10-11T03:56:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:37 crc kubenswrapper[4798]: I1011 03:56:37.422777 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 11 03:56:37 crc kubenswrapper[4798]: E1011 03:56:37.423058 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 11 03:56:37 crc kubenswrapper[4798]: I1011 03:56:37.423439 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 11 03:56:37 crc kubenswrapper[4798]: E1011 03:56:37.423613 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 11 03:56:37 crc kubenswrapper[4798]: I1011 03:56:37.423693 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-5bfzt" Oct 11 03:56:37 crc kubenswrapper[4798]: E1011 03:56:37.423872 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-5bfzt" podUID="3cf2b185-9a26-4448-8fc5-4885f98daf87" Oct 11 03:56:37 crc kubenswrapper[4798]: I1011 03:56:37.424063 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 11 03:56:37 crc kubenswrapper[4798]: E1011 03:56:37.424193 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 11 03:56:37 crc kubenswrapper[4798]: I1011 03:56:37.498870 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:37 crc kubenswrapper[4798]: I1011 03:56:37.498938 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:37 crc kubenswrapper[4798]: I1011 03:56:37.498974 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:37 crc kubenswrapper[4798]: I1011 03:56:37.499009 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:37 crc kubenswrapper[4798]: I1011 03:56:37.499036 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:37Z","lastTransitionTime":"2025-10-11T03:56:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:37 crc kubenswrapper[4798]: I1011 03:56:37.603085 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:37 crc kubenswrapper[4798]: I1011 03:56:37.603181 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:37 crc kubenswrapper[4798]: I1011 03:56:37.603212 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:37 crc kubenswrapper[4798]: I1011 03:56:37.603249 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:37 crc kubenswrapper[4798]: I1011 03:56:37.603274 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:37Z","lastTransitionTime":"2025-10-11T03:56:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:37 crc kubenswrapper[4798]: I1011 03:56:37.706294 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:37 crc kubenswrapper[4798]: I1011 03:56:37.706448 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:37 crc kubenswrapper[4798]: I1011 03:56:37.706478 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:37 crc kubenswrapper[4798]: I1011 03:56:37.706515 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:37 crc kubenswrapper[4798]: I1011 03:56:37.706541 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:37Z","lastTransitionTime":"2025-10-11T03:56:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:37 crc kubenswrapper[4798]: I1011 03:56:37.810723 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:37 crc kubenswrapper[4798]: I1011 03:56:37.810800 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:37 crc kubenswrapper[4798]: I1011 03:56:37.810834 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:37 crc kubenswrapper[4798]: I1011 03:56:37.810864 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:37 crc kubenswrapper[4798]: I1011 03:56:37.810889 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:37Z","lastTransitionTime":"2025-10-11T03:56:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:37 crc kubenswrapper[4798]: I1011 03:56:37.915513 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:37 crc kubenswrapper[4798]: I1011 03:56:37.915581 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:37 crc kubenswrapper[4798]: I1011 03:56:37.915602 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:37 crc kubenswrapper[4798]: I1011 03:56:37.915637 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:37 crc kubenswrapper[4798]: I1011 03:56:37.915658 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:37Z","lastTransitionTime":"2025-10-11T03:56:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:38 crc kubenswrapper[4798]: I1011 03:56:38.018982 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:38 crc kubenswrapper[4798]: I1011 03:56:38.019062 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:38 crc kubenswrapper[4798]: I1011 03:56:38.019081 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:38 crc kubenswrapper[4798]: I1011 03:56:38.019114 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:38 crc kubenswrapper[4798]: I1011 03:56:38.019142 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:38Z","lastTransitionTime":"2025-10-11T03:56:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:38 crc kubenswrapper[4798]: I1011 03:56:38.123038 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:38 crc kubenswrapper[4798]: I1011 03:56:38.123110 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:38 crc kubenswrapper[4798]: I1011 03:56:38.123137 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:38 crc kubenswrapper[4798]: I1011 03:56:38.123168 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:38 crc kubenswrapper[4798]: I1011 03:56:38.123191 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:38Z","lastTransitionTime":"2025-10-11T03:56:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:38 crc kubenswrapper[4798]: I1011 03:56:38.226844 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:38 crc kubenswrapper[4798]: I1011 03:56:38.226914 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:38 crc kubenswrapper[4798]: I1011 03:56:38.226939 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:38 crc kubenswrapper[4798]: I1011 03:56:38.226969 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:38 crc kubenswrapper[4798]: I1011 03:56:38.226989 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:38Z","lastTransitionTime":"2025-10-11T03:56:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:38 crc kubenswrapper[4798]: I1011 03:56:38.330290 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:38 crc kubenswrapper[4798]: I1011 03:56:38.330380 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:38 crc kubenswrapper[4798]: I1011 03:56:38.330460 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:38 crc kubenswrapper[4798]: I1011 03:56:38.330496 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:38 crc kubenswrapper[4798]: I1011 03:56:38.330522 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:38Z","lastTransitionTime":"2025-10-11T03:56:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:38 crc kubenswrapper[4798]: I1011 03:56:38.434379 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:38 crc kubenswrapper[4798]: I1011 03:56:38.434494 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:38 crc kubenswrapper[4798]: I1011 03:56:38.434519 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:38 crc kubenswrapper[4798]: I1011 03:56:38.434549 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:38 crc kubenswrapper[4798]: I1011 03:56:38.434574 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:38Z","lastTransitionTime":"2025-10-11T03:56:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:38 crc kubenswrapper[4798]: I1011 03:56:38.538794 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:38 crc kubenswrapper[4798]: I1011 03:56:38.538895 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:38 crc kubenswrapper[4798]: I1011 03:56:38.538919 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:38 crc kubenswrapper[4798]: I1011 03:56:38.538957 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:38 crc kubenswrapper[4798]: I1011 03:56:38.538986 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:38Z","lastTransitionTime":"2025-10-11T03:56:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:38 crc kubenswrapper[4798]: I1011 03:56:38.642594 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:38 crc kubenswrapper[4798]: I1011 03:56:38.642696 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:38 crc kubenswrapper[4798]: I1011 03:56:38.642718 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:38 crc kubenswrapper[4798]: I1011 03:56:38.642752 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:38 crc kubenswrapper[4798]: I1011 03:56:38.642784 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:38Z","lastTransitionTime":"2025-10-11T03:56:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:38 crc kubenswrapper[4798]: I1011 03:56:38.746747 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:38 crc kubenswrapper[4798]: I1011 03:56:38.746808 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:38 crc kubenswrapper[4798]: I1011 03:56:38.746825 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:38 crc kubenswrapper[4798]: I1011 03:56:38.746850 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:38 crc kubenswrapper[4798]: I1011 03:56:38.746866 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:38Z","lastTransitionTime":"2025-10-11T03:56:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:38 crc kubenswrapper[4798]: I1011 03:56:38.850455 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:38 crc kubenswrapper[4798]: I1011 03:56:38.850490 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:38 crc kubenswrapper[4798]: I1011 03:56:38.850501 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:38 crc kubenswrapper[4798]: I1011 03:56:38.850519 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:38 crc kubenswrapper[4798]: I1011 03:56:38.850532 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:38Z","lastTransitionTime":"2025-10-11T03:56:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:38 crc kubenswrapper[4798]: I1011 03:56:38.954459 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:38 crc kubenswrapper[4798]: I1011 03:56:38.954551 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:38 crc kubenswrapper[4798]: I1011 03:56:38.954582 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:38 crc kubenswrapper[4798]: I1011 03:56:38.954616 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:38 crc kubenswrapper[4798]: I1011 03:56:38.954640 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:38Z","lastTransitionTime":"2025-10-11T03:56:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:39 crc kubenswrapper[4798]: I1011 03:56:39.058437 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:39 crc kubenswrapper[4798]: I1011 03:56:39.058519 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:39 crc kubenswrapper[4798]: I1011 03:56:39.058541 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:39 crc kubenswrapper[4798]: I1011 03:56:39.058572 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:39 crc kubenswrapper[4798]: I1011 03:56:39.058595 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:39Z","lastTransitionTime":"2025-10-11T03:56:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:39 crc kubenswrapper[4798]: I1011 03:56:39.162262 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:39 crc kubenswrapper[4798]: I1011 03:56:39.162338 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:39 crc kubenswrapper[4798]: I1011 03:56:39.162358 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:39 crc kubenswrapper[4798]: I1011 03:56:39.162386 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:39 crc kubenswrapper[4798]: I1011 03:56:39.162448 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:39Z","lastTransitionTime":"2025-10-11T03:56:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:39 crc kubenswrapper[4798]: I1011 03:56:39.266744 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:39 crc kubenswrapper[4798]: I1011 03:56:39.266818 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:39 crc kubenswrapper[4798]: I1011 03:56:39.266835 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:39 crc kubenswrapper[4798]: I1011 03:56:39.266864 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:39 crc kubenswrapper[4798]: I1011 03:56:39.266881 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:39Z","lastTransitionTime":"2025-10-11T03:56:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:39 crc kubenswrapper[4798]: I1011 03:56:39.369986 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:39 crc kubenswrapper[4798]: I1011 03:56:39.370073 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:39 crc kubenswrapper[4798]: I1011 03:56:39.370094 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:39 crc kubenswrapper[4798]: I1011 03:56:39.370122 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:39 crc kubenswrapper[4798]: I1011 03:56:39.370146 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:39Z","lastTransitionTime":"2025-10-11T03:56:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:39 crc kubenswrapper[4798]: I1011 03:56:39.423617 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 11 03:56:39 crc kubenswrapper[4798]: I1011 03:56:39.423702 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-5bfzt" Oct 11 03:56:39 crc kubenswrapper[4798]: I1011 03:56:39.423674 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 11 03:56:39 crc kubenswrapper[4798]: I1011 03:56:39.423797 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 11 03:56:39 crc kubenswrapper[4798]: E1011 03:56:39.424115 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 11 03:56:39 crc kubenswrapper[4798]: E1011 03:56:39.424579 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 11 03:56:39 crc kubenswrapper[4798]: E1011 03:56:39.424707 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-5bfzt" podUID="3cf2b185-9a26-4448-8fc5-4885f98daf87" Oct 11 03:56:39 crc kubenswrapper[4798]: E1011 03:56:39.424795 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 11 03:56:39 crc kubenswrapper[4798]: I1011 03:56:39.472887 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:39 crc kubenswrapper[4798]: I1011 03:56:39.472952 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:39 crc kubenswrapper[4798]: I1011 03:56:39.472983 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:39 crc kubenswrapper[4798]: I1011 03:56:39.472999 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:39 crc kubenswrapper[4798]: I1011 03:56:39.473009 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:39Z","lastTransitionTime":"2025-10-11T03:56:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:39 crc kubenswrapper[4798]: I1011 03:56:39.576717 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:39 crc kubenswrapper[4798]: I1011 03:56:39.576779 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:39 crc kubenswrapper[4798]: I1011 03:56:39.576801 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:39 crc kubenswrapper[4798]: I1011 03:56:39.576830 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:39 crc kubenswrapper[4798]: I1011 03:56:39.576854 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:39Z","lastTransitionTime":"2025-10-11T03:56:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:39 crc kubenswrapper[4798]: I1011 03:56:39.680271 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:39 crc kubenswrapper[4798]: I1011 03:56:39.680383 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:39 crc kubenswrapper[4798]: I1011 03:56:39.680528 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:39 crc kubenswrapper[4798]: I1011 03:56:39.680573 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:39 crc kubenswrapper[4798]: I1011 03:56:39.680637 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:39Z","lastTransitionTime":"2025-10-11T03:56:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:39 crc kubenswrapper[4798]: I1011 03:56:39.783976 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:39 crc kubenswrapper[4798]: I1011 03:56:39.784033 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:39 crc kubenswrapper[4798]: I1011 03:56:39.784046 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:39 crc kubenswrapper[4798]: I1011 03:56:39.784059 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:39 crc kubenswrapper[4798]: I1011 03:56:39.784068 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:39Z","lastTransitionTime":"2025-10-11T03:56:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:39 crc kubenswrapper[4798]: I1011 03:56:39.887176 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:39 crc kubenswrapper[4798]: I1011 03:56:39.887234 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:39 crc kubenswrapper[4798]: I1011 03:56:39.887251 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:39 crc kubenswrapper[4798]: I1011 03:56:39.887272 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:39 crc kubenswrapper[4798]: I1011 03:56:39.887287 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:39Z","lastTransitionTime":"2025-10-11T03:56:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:39 crc kubenswrapper[4798]: I1011 03:56:39.990732 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:39 crc kubenswrapper[4798]: I1011 03:56:39.990784 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:39 crc kubenswrapper[4798]: I1011 03:56:39.990800 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:39 crc kubenswrapper[4798]: I1011 03:56:39.990823 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:39 crc kubenswrapper[4798]: I1011 03:56:39.990839 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:39Z","lastTransitionTime":"2025-10-11T03:56:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:40 crc kubenswrapper[4798]: I1011 03:56:40.093705 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:40 crc kubenswrapper[4798]: I1011 03:56:40.093764 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:40 crc kubenswrapper[4798]: I1011 03:56:40.093773 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:40 crc kubenswrapper[4798]: I1011 03:56:40.093791 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:40 crc kubenswrapper[4798]: I1011 03:56:40.093802 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:40Z","lastTransitionTime":"2025-10-11T03:56:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:40 crc kubenswrapper[4798]: I1011 03:56:40.196994 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:40 crc kubenswrapper[4798]: I1011 03:56:40.197051 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:40 crc kubenswrapper[4798]: I1011 03:56:40.197060 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:40 crc kubenswrapper[4798]: I1011 03:56:40.197080 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:40 crc kubenswrapper[4798]: I1011 03:56:40.197094 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:40Z","lastTransitionTime":"2025-10-11T03:56:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:40 crc kubenswrapper[4798]: I1011 03:56:40.303172 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:40 crc kubenswrapper[4798]: I1011 03:56:40.303241 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:40 crc kubenswrapper[4798]: I1011 03:56:40.303258 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:40 crc kubenswrapper[4798]: I1011 03:56:40.303284 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:40 crc kubenswrapper[4798]: I1011 03:56:40.303302 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:40Z","lastTransitionTime":"2025-10-11T03:56:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:40 crc kubenswrapper[4798]: I1011 03:56:40.407113 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:40 crc kubenswrapper[4798]: I1011 03:56:40.407180 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:40 crc kubenswrapper[4798]: I1011 03:56:40.407203 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:40 crc kubenswrapper[4798]: I1011 03:56:40.407232 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:40 crc kubenswrapper[4798]: I1011 03:56:40.407249 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:40Z","lastTransitionTime":"2025-10-11T03:56:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:40 crc kubenswrapper[4798]: I1011 03:56:40.510253 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:40 crc kubenswrapper[4798]: I1011 03:56:40.510342 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:40 crc kubenswrapper[4798]: I1011 03:56:40.510363 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:40 crc kubenswrapper[4798]: I1011 03:56:40.510422 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:40 crc kubenswrapper[4798]: I1011 03:56:40.510447 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:40Z","lastTransitionTime":"2025-10-11T03:56:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:40 crc kubenswrapper[4798]: I1011 03:56:40.613732 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:40 crc kubenswrapper[4798]: I1011 03:56:40.613794 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:40 crc kubenswrapper[4798]: I1011 03:56:40.613809 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:40 crc kubenswrapper[4798]: I1011 03:56:40.613839 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:40 crc kubenswrapper[4798]: I1011 03:56:40.613854 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:40Z","lastTransitionTime":"2025-10-11T03:56:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:40 crc kubenswrapper[4798]: I1011 03:56:40.717302 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:40 crc kubenswrapper[4798]: I1011 03:56:40.717370 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:40 crc kubenswrapper[4798]: I1011 03:56:40.717423 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:40 crc kubenswrapper[4798]: I1011 03:56:40.717458 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:40 crc kubenswrapper[4798]: I1011 03:56:40.717486 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:40Z","lastTransitionTime":"2025-10-11T03:56:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:40 crc kubenswrapper[4798]: I1011 03:56:40.820250 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:40 crc kubenswrapper[4798]: I1011 03:56:40.820308 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:40 crc kubenswrapper[4798]: I1011 03:56:40.820325 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:40 crc kubenswrapper[4798]: I1011 03:56:40.820349 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:40 crc kubenswrapper[4798]: I1011 03:56:40.820366 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:40Z","lastTransitionTime":"2025-10-11T03:56:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:40 crc kubenswrapper[4798]: I1011 03:56:40.923280 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:40 crc kubenswrapper[4798]: I1011 03:56:40.923381 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:40 crc kubenswrapper[4798]: I1011 03:56:40.923465 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:40 crc kubenswrapper[4798]: I1011 03:56:40.923508 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:40 crc kubenswrapper[4798]: I1011 03:56:40.923556 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:40Z","lastTransitionTime":"2025-10-11T03:56:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:41 crc kubenswrapper[4798]: I1011 03:56:41.026819 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:41 crc kubenswrapper[4798]: I1011 03:56:41.026885 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:41 crc kubenswrapper[4798]: I1011 03:56:41.026901 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:41 crc kubenswrapper[4798]: I1011 03:56:41.026928 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:41 crc kubenswrapper[4798]: I1011 03:56:41.026944 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:41Z","lastTransitionTime":"2025-10-11T03:56:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:41 crc kubenswrapper[4798]: I1011 03:56:41.129429 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:41 crc kubenswrapper[4798]: I1011 03:56:41.129466 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:41 crc kubenswrapper[4798]: I1011 03:56:41.129474 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:41 crc kubenswrapper[4798]: I1011 03:56:41.129489 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:41 crc kubenswrapper[4798]: I1011 03:56:41.129500 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:41Z","lastTransitionTime":"2025-10-11T03:56:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:41 crc kubenswrapper[4798]: I1011 03:56:41.233322 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:41 crc kubenswrapper[4798]: I1011 03:56:41.233382 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:41 crc kubenswrapper[4798]: I1011 03:56:41.233435 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:41 crc kubenswrapper[4798]: I1011 03:56:41.233463 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:41 crc kubenswrapper[4798]: I1011 03:56:41.233483 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:41Z","lastTransitionTime":"2025-10-11T03:56:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:41 crc kubenswrapper[4798]: I1011 03:56:41.337274 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:41 crc kubenswrapper[4798]: I1011 03:56:41.337339 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:41 crc kubenswrapper[4798]: I1011 03:56:41.337355 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:41 crc kubenswrapper[4798]: I1011 03:56:41.337382 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:41 crc kubenswrapper[4798]: I1011 03:56:41.337421 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:41Z","lastTransitionTime":"2025-10-11T03:56:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:41 crc kubenswrapper[4798]: I1011 03:56:41.423839 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 11 03:56:41 crc kubenswrapper[4798]: I1011 03:56:41.423850 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 11 03:56:41 crc kubenswrapper[4798]: E1011 03:56:41.424753 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 11 03:56:41 crc kubenswrapper[4798]: I1011 03:56:41.423905 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-5bfzt" Oct 11 03:56:41 crc kubenswrapper[4798]: I1011 03:56:41.423867 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 11 03:56:41 crc kubenswrapper[4798]: E1011 03:56:41.424882 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 11 03:56:41 crc kubenswrapper[4798]: E1011 03:56:41.425807 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-5bfzt" podUID="3cf2b185-9a26-4448-8fc5-4885f98daf87" Oct 11 03:56:41 crc kubenswrapper[4798]: E1011 03:56:41.425963 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 11 03:56:41 crc kubenswrapper[4798]: I1011 03:56:41.441266 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:41 crc kubenswrapper[4798]: I1011 03:56:41.441323 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:41 crc kubenswrapper[4798]: I1011 03:56:41.441344 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:41 crc kubenswrapper[4798]: I1011 03:56:41.441370 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:41 crc kubenswrapper[4798]: I1011 03:56:41.441419 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:41Z","lastTransitionTime":"2025-10-11T03:56:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:41 crc kubenswrapper[4798]: I1011 03:56:41.545371 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:41 crc kubenswrapper[4798]: I1011 03:56:41.545491 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:41 crc kubenswrapper[4798]: I1011 03:56:41.545513 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:41 crc kubenswrapper[4798]: I1011 03:56:41.545543 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:41 crc kubenswrapper[4798]: I1011 03:56:41.545565 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:41Z","lastTransitionTime":"2025-10-11T03:56:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:41 crc kubenswrapper[4798]: I1011 03:56:41.649555 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:41 crc kubenswrapper[4798]: I1011 03:56:41.649642 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:41 crc kubenswrapper[4798]: I1011 03:56:41.649666 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:41 crc kubenswrapper[4798]: I1011 03:56:41.649696 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:41 crc kubenswrapper[4798]: I1011 03:56:41.649717 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:41Z","lastTransitionTime":"2025-10-11T03:56:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:41 crc kubenswrapper[4798]: I1011 03:56:41.753328 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:41 crc kubenswrapper[4798]: I1011 03:56:41.753441 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:41 crc kubenswrapper[4798]: I1011 03:56:41.753463 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:41 crc kubenswrapper[4798]: I1011 03:56:41.753493 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:41 crc kubenswrapper[4798]: I1011 03:56:41.753517 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:41Z","lastTransitionTime":"2025-10-11T03:56:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:41 crc kubenswrapper[4798]: I1011 03:56:41.856776 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:41 crc kubenswrapper[4798]: I1011 03:56:41.856864 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:41 crc kubenswrapper[4798]: I1011 03:56:41.856888 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:41 crc kubenswrapper[4798]: I1011 03:56:41.856922 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:41 crc kubenswrapper[4798]: I1011 03:56:41.856946 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:41Z","lastTransitionTime":"2025-10-11T03:56:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:41 crc kubenswrapper[4798]: I1011 03:56:41.960837 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:41 crc kubenswrapper[4798]: I1011 03:56:41.960910 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:41 crc kubenswrapper[4798]: I1011 03:56:41.960930 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:41 crc kubenswrapper[4798]: I1011 03:56:41.960966 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:41 crc kubenswrapper[4798]: I1011 03:56:41.960994 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:41Z","lastTransitionTime":"2025-10-11T03:56:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:42 crc kubenswrapper[4798]: I1011 03:56:42.064844 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:42 crc kubenswrapper[4798]: I1011 03:56:42.064925 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:42 crc kubenswrapper[4798]: I1011 03:56:42.064953 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:42 crc kubenswrapper[4798]: I1011 03:56:42.064983 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:42 crc kubenswrapper[4798]: I1011 03:56:42.065005 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:42Z","lastTransitionTime":"2025-10-11T03:56:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:42 crc kubenswrapper[4798]: I1011 03:56:42.168778 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:42 crc kubenswrapper[4798]: I1011 03:56:42.168835 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:42 crc kubenswrapper[4798]: I1011 03:56:42.168856 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:42 crc kubenswrapper[4798]: I1011 03:56:42.168887 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:42 crc kubenswrapper[4798]: I1011 03:56:42.168908 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:42Z","lastTransitionTime":"2025-10-11T03:56:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:42 crc kubenswrapper[4798]: I1011 03:56:42.272502 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:42 crc kubenswrapper[4798]: I1011 03:56:42.272590 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:42 crc kubenswrapper[4798]: I1011 03:56:42.272614 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:42 crc kubenswrapper[4798]: I1011 03:56:42.272652 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:42 crc kubenswrapper[4798]: I1011 03:56:42.272679 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:42Z","lastTransitionTime":"2025-10-11T03:56:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:42 crc kubenswrapper[4798]: I1011 03:56:42.376768 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:42 crc kubenswrapper[4798]: I1011 03:56:42.376839 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:42 crc kubenswrapper[4798]: I1011 03:56:42.376868 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:42 crc kubenswrapper[4798]: I1011 03:56:42.376898 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:42 crc kubenswrapper[4798]: I1011 03:56:42.376920 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:42Z","lastTransitionTime":"2025-10-11T03:56:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:42 crc kubenswrapper[4798]: I1011 03:56:42.480543 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:42 crc kubenswrapper[4798]: I1011 03:56:42.480589 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:42 crc kubenswrapper[4798]: I1011 03:56:42.480606 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:42 crc kubenswrapper[4798]: I1011 03:56:42.480629 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:42 crc kubenswrapper[4798]: I1011 03:56:42.480647 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:42Z","lastTransitionTime":"2025-10-11T03:56:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:42 crc kubenswrapper[4798]: I1011 03:56:42.584498 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:42 crc kubenswrapper[4798]: I1011 03:56:42.584576 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:42 crc kubenswrapper[4798]: I1011 03:56:42.584597 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:42 crc kubenswrapper[4798]: I1011 03:56:42.584627 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:42 crc kubenswrapper[4798]: I1011 03:56:42.584652 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:42Z","lastTransitionTime":"2025-10-11T03:56:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:42 crc kubenswrapper[4798]: I1011 03:56:42.688769 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:42 crc kubenswrapper[4798]: I1011 03:56:42.688845 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:42 crc kubenswrapper[4798]: I1011 03:56:42.688864 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:42 crc kubenswrapper[4798]: I1011 03:56:42.688895 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:42 crc kubenswrapper[4798]: I1011 03:56:42.688915 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:42Z","lastTransitionTime":"2025-10-11T03:56:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:42 crc kubenswrapper[4798]: I1011 03:56:42.793142 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:42 crc kubenswrapper[4798]: I1011 03:56:42.793226 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:42 crc kubenswrapper[4798]: I1011 03:56:42.793247 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:42 crc kubenswrapper[4798]: I1011 03:56:42.793281 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:42 crc kubenswrapper[4798]: I1011 03:56:42.793304 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:42Z","lastTransitionTime":"2025-10-11T03:56:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:42 crc kubenswrapper[4798]: I1011 03:56:42.897293 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:42 crc kubenswrapper[4798]: I1011 03:56:42.897365 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:42 crc kubenswrapper[4798]: I1011 03:56:42.897385 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:42 crc kubenswrapper[4798]: I1011 03:56:42.897450 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:42 crc kubenswrapper[4798]: I1011 03:56:42.897482 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:42Z","lastTransitionTime":"2025-10-11T03:56:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:43 crc kubenswrapper[4798]: I1011 03:56:43.001335 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:43 crc kubenswrapper[4798]: I1011 03:56:43.001582 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:43 crc kubenswrapper[4798]: I1011 03:56:43.001615 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:43 crc kubenswrapper[4798]: I1011 03:56:43.001660 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:43 crc kubenswrapper[4798]: I1011 03:56:43.001691 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:43Z","lastTransitionTime":"2025-10-11T03:56:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:43 crc kubenswrapper[4798]: I1011 03:56:43.105461 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:43 crc kubenswrapper[4798]: I1011 03:56:43.105533 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:43 crc kubenswrapper[4798]: I1011 03:56:43.105559 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:43 crc kubenswrapper[4798]: I1011 03:56:43.105593 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:43 crc kubenswrapper[4798]: I1011 03:56:43.105621 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:43Z","lastTransitionTime":"2025-10-11T03:56:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:43 crc kubenswrapper[4798]: I1011 03:56:43.208754 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:43 crc kubenswrapper[4798]: I1011 03:56:43.208818 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:43 crc kubenswrapper[4798]: I1011 03:56:43.208834 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:43 crc kubenswrapper[4798]: I1011 03:56:43.208855 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:43 crc kubenswrapper[4798]: I1011 03:56:43.208870 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:43Z","lastTransitionTime":"2025-10-11T03:56:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:43 crc kubenswrapper[4798]: I1011 03:56:43.313446 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:43 crc kubenswrapper[4798]: I1011 03:56:43.313500 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:43 crc kubenswrapper[4798]: I1011 03:56:43.313515 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:43 crc kubenswrapper[4798]: I1011 03:56:43.313538 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:43 crc kubenswrapper[4798]: I1011 03:56:43.313554 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:43Z","lastTransitionTime":"2025-10-11T03:56:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:43 crc kubenswrapper[4798]: I1011 03:56:43.417131 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:43 crc kubenswrapper[4798]: I1011 03:56:43.417209 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:43 crc kubenswrapper[4798]: I1011 03:56:43.417235 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:43 crc kubenswrapper[4798]: I1011 03:56:43.417273 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:43 crc kubenswrapper[4798]: I1011 03:56:43.417299 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:43Z","lastTransitionTime":"2025-10-11T03:56:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:43 crc kubenswrapper[4798]: I1011 03:56:43.423722 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 11 03:56:43 crc kubenswrapper[4798]: I1011 03:56:43.423772 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 11 03:56:43 crc kubenswrapper[4798]: I1011 03:56:43.423826 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-5bfzt" Oct 11 03:56:43 crc kubenswrapper[4798]: E1011 03:56:43.423950 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 11 03:56:43 crc kubenswrapper[4798]: E1011 03:56:43.424107 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 11 03:56:43 crc kubenswrapper[4798]: I1011 03:56:43.424132 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 11 03:56:43 crc kubenswrapper[4798]: E1011 03:56:43.424257 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-5bfzt" podUID="3cf2b185-9a26-4448-8fc5-4885f98daf87" Oct 11 03:56:43 crc kubenswrapper[4798]: E1011 03:56:43.424434 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 11 03:56:43 crc kubenswrapper[4798]: I1011 03:56:43.444088 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 03:56:43 crc kubenswrapper[4798]: I1011 03:56:43.444179 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 03:56:43 crc kubenswrapper[4798]: I1011 03:56:43.444201 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 03:56:43 crc kubenswrapper[4798]: I1011 03:56:43.444234 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 03:56:43 crc kubenswrapper[4798]: I1011 03:56:43.444264 4798 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T03:56:43Z","lastTransitionTime":"2025-10-11T03:56:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 03:56:43 crc kubenswrapper[4798]: I1011 03:56:43.492605 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/3cf2b185-9a26-4448-8fc5-4885f98daf87-metrics-certs\") pod \"network-metrics-daemon-5bfzt\" (UID: \"3cf2b185-9a26-4448-8fc5-4885f98daf87\") " pod="openshift-multus/network-metrics-daemon-5bfzt" Oct 11 03:56:43 crc kubenswrapper[4798]: E1011 03:56:43.492832 4798 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Oct 11 03:56:43 crc kubenswrapper[4798]: E1011 03:56:43.492934 4798 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/3cf2b185-9a26-4448-8fc5-4885f98daf87-metrics-certs podName:3cf2b185-9a26-4448-8fc5-4885f98daf87 nodeName:}" failed. No retries permitted until 2025-10-11 03:57:47.492900021 +0000 UTC m=+162.829189737 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/3cf2b185-9a26-4448-8fc5-4885f98daf87-metrics-certs") pod "network-metrics-daemon-5bfzt" (UID: "3cf2b185-9a26-4448-8fc5-4885f98daf87") : object "openshift-multus"/"metrics-daemon-secret" not registered Oct 11 03:56:43 crc kubenswrapper[4798]: I1011 03:56:43.527907 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-version/cluster-version-operator-5c965bbfc6-k622v"] Oct 11 03:56:43 crc kubenswrapper[4798]: I1011 03:56:43.528633 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-k622v" Oct 11 03:56:43 crc kubenswrapper[4798]: I1011 03:56:43.531109 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"kube-root-ca.crt" Oct 11 03:56:43 crc kubenswrapper[4798]: I1011 03:56:43.532484 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"cluster-version-operator-serving-cert" Oct 11 03:56:43 crc kubenswrapper[4798]: I1011 03:56:43.532621 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"default-dockercfg-gxtc4" Oct 11 03:56:43 crc kubenswrapper[4798]: I1011 03:56:43.533006 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"openshift-service-ca.crt" Oct 11 03:56:43 crc kubenswrapper[4798]: I1011 03:56:43.593222 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/7ed0a19c-70d2-45f0-b764-7a6bcc03f736-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-k622v\" (UID: \"7ed0a19c-70d2-45f0-b764-7a6bcc03f736\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-k622v" Oct 11 03:56:43 crc kubenswrapper[4798]: I1011 03:56:43.593299 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/7ed0a19c-70d2-45f0-b764-7a6bcc03f736-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-k622v\" (UID: \"7ed0a19c-70d2-45f0-b764-7a6bcc03f736\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-k622v" Oct 11 03:56:43 crc kubenswrapper[4798]: I1011 03:56:43.593331 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/7ed0a19c-70d2-45f0-b764-7a6bcc03f736-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-k622v\" (UID: \"7ed0a19c-70d2-45f0-b764-7a6bcc03f736\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-k622v" Oct 11 03:56:43 crc kubenswrapper[4798]: I1011 03:56:43.593371 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7ed0a19c-70d2-45f0-b764-7a6bcc03f736-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-k622v\" (UID: \"7ed0a19c-70d2-45f0-b764-7a6bcc03f736\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-k622v" Oct 11 03:56:43 crc kubenswrapper[4798]: I1011 03:56:43.593431 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/7ed0a19c-70d2-45f0-b764-7a6bcc03f736-service-ca\") pod \"cluster-version-operator-5c965bbfc6-k622v\" (UID: \"7ed0a19c-70d2-45f0-b764-7a6bcc03f736\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-k622v" Oct 11 03:56:43 crc kubenswrapper[4798]: I1011 03:56:43.606125 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podStartSLOduration=78.606106798 podStartE2EDuration="1m18.606106798s" podCreationTimestamp="2025-10-11 03:55:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 03:56:43.581864139 +0000 UTC m=+98.918153855" watchObservedRunningTime="2025-10-11 03:56:43.606106798 +0000 UTC m=+98.942396484" Oct 11 03:56:43 crc kubenswrapper[4798]: I1011 03:56:43.643377 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-additional-cni-plugins-7fk5l" podStartSLOduration=78.643346725 podStartE2EDuration="1m18.643346725s" podCreationTimestamp="2025-10-11 03:55:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 03:56:43.606717052 +0000 UTC m=+98.943006738" watchObservedRunningTime="2025-10-11 03:56:43.643346725 +0000 UTC m=+98.979636421" Oct 11 03:56:43 crc kubenswrapper[4798]: I1011 03:56:43.655880 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/node-ca-cwk9h" podStartSLOduration=78.655856313 podStartE2EDuration="1m18.655856313s" podCreationTimestamp="2025-10-11 03:55:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 03:56:43.65573404 +0000 UTC m=+98.992023746" watchObservedRunningTime="2025-10-11 03:56:43.655856313 +0000 UTC m=+98.992146009" Oct 11 03:56:43 crc kubenswrapper[4798]: I1011 03:56:43.694206 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7ed0a19c-70d2-45f0-b764-7a6bcc03f736-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-k622v\" (UID: \"7ed0a19c-70d2-45f0-b764-7a6bcc03f736\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-k622v" Oct 11 03:56:43 crc kubenswrapper[4798]: I1011 03:56:43.694274 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/7ed0a19c-70d2-45f0-b764-7a6bcc03f736-service-ca\") pod \"cluster-version-operator-5c965bbfc6-k622v\" (UID: \"7ed0a19c-70d2-45f0-b764-7a6bcc03f736\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-k622v" Oct 11 03:56:43 crc kubenswrapper[4798]: I1011 03:56:43.694324 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/7ed0a19c-70d2-45f0-b764-7a6bcc03f736-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-k622v\" (UID: \"7ed0a19c-70d2-45f0-b764-7a6bcc03f736\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-k622v" Oct 11 03:56:43 crc kubenswrapper[4798]: I1011 03:56:43.694365 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/7ed0a19c-70d2-45f0-b764-7a6bcc03f736-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-k622v\" (UID: \"7ed0a19c-70d2-45f0-b764-7a6bcc03f736\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-k622v" Oct 11 03:56:43 crc kubenswrapper[4798]: I1011 03:56:43.694420 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/7ed0a19c-70d2-45f0-b764-7a6bcc03f736-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-k622v\" (UID: \"7ed0a19c-70d2-45f0-b764-7a6bcc03f736\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-k622v" Oct 11 03:56:43 crc kubenswrapper[4798]: I1011 03:56:43.694523 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/7ed0a19c-70d2-45f0-b764-7a6bcc03f736-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-k622v\" (UID: \"7ed0a19c-70d2-45f0-b764-7a6bcc03f736\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-k622v" Oct 11 03:56:43 crc kubenswrapper[4798]: I1011 03:56:43.694571 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/7ed0a19c-70d2-45f0-b764-7a6bcc03f736-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-k622v\" (UID: \"7ed0a19c-70d2-45f0-b764-7a6bcc03f736\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-k622v" Oct 11 03:56:43 crc kubenswrapper[4798]: I1011 03:56:43.696865 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/7ed0a19c-70d2-45f0-b764-7a6bcc03f736-service-ca\") pod \"cluster-version-operator-5c965bbfc6-k622v\" (UID: \"7ed0a19c-70d2-45f0-b764-7a6bcc03f736\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-k622v" Oct 11 03:56:43 crc kubenswrapper[4798]: I1011 03:56:43.697442 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podStartSLOduration=78.697374599 podStartE2EDuration="1m18.697374599s" podCreationTimestamp="2025-10-11 03:55:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 03:56:43.696694473 +0000 UTC m=+99.032984149" watchObservedRunningTime="2025-10-11 03:56:43.697374599 +0000 UTC m=+99.033664325" Oct 11 03:56:43 crc kubenswrapper[4798]: I1011 03:56:43.697828 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-crc" podStartSLOduration=78.697815549 podStartE2EDuration="1m18.697815549s" podCreationTimestamp="2025-10-11 03:55:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 03:56:43.675494665 +0000 UTC m=+99.011784361" watchObservedRunningTime="2025-10-11 03:56:43.697815549 +0000 UTC m=+99.034105275" Oct 11 03:56:43 crc kubenswrapper[4798]: I1011 03:56:43.707885 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7ed0a19c-70d2-45f0-b764-7a6bcc03f736-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-k622v\" (UID: \"7ed0a19c-70d2-45f0-b764-7a6bcc03f736\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-k622v" Oct 11 03:56:43 crc kubenswrapper[4798]: I1011 03:56:43.719969 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/7ed0a19c-70d2-45f0-b764-7a6bcc03f736-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-k622v\" (UID: \"7ed0a19c-70d2-45f0-b764-7a6bcc03f736\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-k622v" Oct 11 03:56:43 crc kubenswrapper[4798]: I1011 03:56:43.761443 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/node-resolver-kszgd" podStartSLOduration=78.761365033 podStartE2EDuration="1m18.761365033s" podCreationTimestamp="2025-10-11 03:55:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 03:56:43.744043444 +0000 UTC m=+99.080333170" watchObservedRunningTime="2025-10-11 03:56:43.761365033 +0000 UTC m=+99.097654759" Oct 11 03:56:43 crc kubenswrapper[4798]: I1011 03:56:43.772457 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" podStartSLOduration=50.772428868 podStartE2EDuration="50.772428868s" podCreationTimestamp="2025-10-11 03:55:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 03:56:43.772092599 +0000 UTC m=+99.108382285" watchObservedRunningTime="2025-10-11 03:56:43.772428868 +0000 UTC m=+99.108718554" Oct 11 03:56:43 crc kubenswrapper[4798]: I1011 03:56:43.835476 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" podStartSLOduration=10.835451999 podStartE2EDuration="10.835451999s" podCreationTimestamp="2025-10-11 03:56:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 03:56:43.835360726 +0000 UTC m=+99.171650412" watchObservedRunningTime="2025-10-11 03:56:43.835451999 +0000 UTC m=+99.171741705" Oct 11 03:56:43 crc kubenswrapper[4798]: I1011 03:56:43.857046 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-k622v" Oct 11 03:56:43 crc kubenswrapper[4798]: I1011 03:56:43.865251 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd/etcd-crc" podStartSLOduration=11.865216914 podStartE2EDuration="11.865216914s" podCreationTimestamp="2025-10-11 03:56:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 03:56:43.864724682 +0000 UTC m=+99.201014388" watchObservedRunningTime="2025-10-11 03:56:43.865216914 +0000 UTC m=+99.201506630" Oct 11 03:56:43 crc kubenswrapper[4798]: I1011 03:56:43.921152 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-p6xdd" podStartSLOduration=78.921128821 podStartE2EDuration="1m18.921128821s" podCreationTimestamp="2025-10-11 03:55:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 03:56:43.906961555 +0000 UTC m=+99.243251291" watchObservedRunningTime="2025-10-11 03:56:43.921128821 +0000 UTC m=+99.257418507" Oct 11 03:56:43 crc kubenswrapper[4798]: I1011 03:56:43.921308 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-5sxhs" podStartSLOduration=78.921304435 podStartE2EDuration="1m18.921304435s" podCreationTimestamp="2025-10-11 03:55:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 03:56:43.920796024 +0000 UTC m=+99.257085730" watchObservedRunningTime="2025-10-11 03:56:43.921304435 +0000 UTC m=+99.257594121" Oct 11 03:56:44 crc kubenswrapper[4798]: I1011 03:56:44.021925 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-k622v" event={"ID":"7ed0a19c-70d2-45f0-b764-7a6bcc03f736","Type":"ContainerStarted","Data":"e562a53dabfb329bacea2b2b1e92f93105a8b5ee7d6ef480411733dc22ac5c34"} Oct 11 03:56:44 crc kubenswrapper[4798]: I1011 03:56:44.022000 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-k622v" event={"ID":"7ed0a19c-70d2-45f0-b764-7a6bcc03f736","Type":"ContainerStarted","Data":"78eac0a12e1d633eaa109437b5e9161c1b8ac1f9d6406ef73b97d6b9deab82f9"} Oct 11 03:56:44 crc kubenswrapper[4798]: I1011 03:56:44.040913 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-k622v" podStartSLOduration=79.040878938 podStartE2EDuration="1m19.040878938s" podCreationTimestamp="2025-10-11 03:55:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 03:56:44.0396387 +0000 UTC m=+99.375928436" watchObservedRunningTime="2025-10-11 03:56:44.040878938 +0000 UTC m=+99.377168664" Oct 11 03:56:45 crc kubenswrapper[4798]: I1011 03:56:45.423487 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-5bfzt" Oct 11 03:56:45 crc kubenswrapper[4798]: I1011 03:56:45.423512 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 11 03:56:45 crc kubenswrapper[4798]: I1011 03:56:45.423645 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 11 03:56:45 crc kubenswrapper[4798]: I1011 03:56:45.423723 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 11 03:56:45 crc kubenswrapper[4798]: E1011 03:56:45.426767 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-5bfzt" podUID="3cf2b185-9a26-4448-8fc5-4885f98daf87" Oct 11 03:56:45 crc kubenswrapper[4798]: E1011 03:56:45.426842 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 11 03:56:45 crc kubenswrapper[4798]: E1011 03:56:45.427176 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 11 03:56:45 crc kubenswrapper[4798]: E1011 03:56:45.427101 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 11 03:56:47 crc kubenswrapper[4798]: I1011 03:56:47.423694 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-5bfzt" Oct 11 03:56:47 crc kubenswrapper[4798]: I1011 03:56:47.423807 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 11 03:56:47 crc kubenswrapper[4798]: I1011 03:56:47.423815 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 11 03:56:47 crc kubenswrapper[4798]: I1011 03:56:47.425263 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 11 03:56:47 crc kubenswrapper[4798]: E1011 03:56:47.425488 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-5bfzt" podUID="3cf2b185-9a26-4448-8fc5-4885f98daf87" Oct 11 03:56:47 crc kubenswrapper[4798]: E1011 03:56:47.425622 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 11 03:56:47 crc kubenswrapper[4798]: E1011 03:56:47.425819 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 11 03:56:47 crc kubenswrapper[4798]: E1011 03:56:47.425937 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 11 03:56:47 crc kubenswrapper[4798]: I1011 03:56:47.426090 4798 scope.go:117] "RemoveContainer" containerID="66e2aa76f109bbdb0ec131c0724c460f5c1313b096e2fb3ebf1e42fd4832c88b" Oct 11 03:56:47 crc kubenswrapper[4798]: E1011 03:56:47.426542 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-svt4z_openshift-ovn-kubernetes(3c2a342b-5252-4957-9e2c-8f81c304b8af)\"" pod="openshift-ovn-kubernetes/ovnkube-node-svt4z" podUID="3c2a342b-5252-4957-9e2c-8f81c304b8af" Oct 11 03:56:49 crc kubenswrapper[4798]: I1011 03:56:49.423089 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 11 03:56:49 crc kubenswrapper[4798]: I1011 03:56:49.423174 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 11 03:56:49 crc kubenswrapper[4798]: I1011 03:56:49.423289 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-5bfzt" Oct 11 03:56:49 crc kubenswrapper[4798]: E1011 03:56:49.423484 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 11 03:56:49 crc kubenswrapper[4798]: I1011 03:56:49.423810 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 11 03:56:49 crc kubenswrapper[4798]: E1011 03:56:49.423880 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 11 03:56:49 crc kubenswrapper[4798]: E1011 03:56:49.424042 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-5bfzt" podUID="3cf2b185-9a26-4448-8fc5-4885f98daf87" Oct 11 03:56:49 crc kubenswrapper[4798]: E1011 03:56:49.424191 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 11 03:56:51 crc kubenswrapper[4798]: I1011 03:56:51.422685 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 11 03:56:51 crc kubenswrapper[4798]: E1011 03:56:51.422895 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 11 03:56:51 crc kubenswrapper[4798]: I1011 03:56:51.423225 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 11 03:56:51 crc kubenswrapper[4798]: E1011 03:56:51.423350 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 11 03:56:51 crc kubenswrapper[4798]: I1011 03:56:51.423671 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 11 03:56:51 crc kubenswrapper[4798]: E1011 03:56:51.423841 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 11 03:56:51 crc kubenswrapper[4798]: I1011 03:56:51.423939 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-5bfzt" Oct 11 03:56:51 crc kubenswrapper[4798]: E1011 03:56:51.424170 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-5bfzt" podUID="3cf2b185-9a26-4448-8fc5-4885f98daf87" Oct 11 03:56:53 crc kubenswrapper[4798]: I1011 03:56:53.422698 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 11 03:56:53 crc kubenswrapper[4798]: I1011 03:56:53.422808 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 11 03:56:53 crc kubenswrapper[4798]: I1011 03:56:53.422861 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-5bfzt" Oct 11 03:56:53 crc kubenswrapper[4798]: I1011 03:56:53.423173 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 11 03:56:53 crc kubenswrapper[4798]: E1011 03:56:53.423239 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 11 03:56:53 crc kubenswrapper[4798]: E1011 03:56:53.423346 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 11 03:56:53 crc kubenswrapper[4798]: E1011 03:56:53.427890 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-5bfzt" podUID="3cf2b185-9a26-4448-8fc5-4885f98daf87" Oct 11 03:56:53 crc kubenswrapper[4798]: E1011 03:56:53.428937 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 11 03:56:55 crc kubenswrapper[4798]: I1011 03:56:55.422707 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 11 03:56:55 crc kubenswrapper[4798]: I1011 03:56:55.422794 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 11 03:56:55 crc kubenswrapper[4798]: I1011 03:56:55.422727 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-5bfzt" Oct 11 03:56:55 crc kubenswrapper[4798]: I1011 03:56:55.422872 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 11 03:56:55 crc kubenswrapper[4798]: E1011 03:56:55.423026 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 11 03:56:55 crc kubenswrapper[4798]: E1011 03:56:55.423160 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-5bfzt" podUID="3cf2b185-9a26-4448-8fc5-4885f98daf87" Oct 11 03:56:55 crc kubenswrapper[4798]: E1011 03:56:55.423272 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 11 03:56:55 crc kubenswrapper[4798]: E1011 03:56:55.423372 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 11 03:56:57 crc kubenswrapper[4798]: I1011 03:56:57.422831 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-5bfzt" Oct 11 03:56:57 crc kubenswrapper[4798]: I1011 03:56:57.422930 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 11 03:56:57 crc kubenswrapper[4798]: I1011 03:56:57.423585 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 11 03:56:57 crc kubenswrapper[4798]: E1011 03:56:57.423831 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-5bfzt" podUID="3cf2b185-9a26-4448-8fc5-4885f98daf87" Oct 11 03:56:57 crc kubenswrapper[4798]: I1011 03:56:57.423889 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 11 03:56:57 crc kubenswrapper[4798]: E1011 03:56:57.424170 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 11 03:56:57 crc kubenswrapper[4798]: E1011 03:56:57.424257 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 11 03:56:57 crc kubenswrapper[4798]: E1011 03:56:57.424123 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 11 03:56:59 crc kubenswrapper[4798]: I1011 03:56:59.423132 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 11 03:56:59 crc kubenswrapper[4798]: I1011 03:56:59.423318 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 11 03:56:59 crc kubenswrapper[4798]: I1011 03:56:59.423361 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-5bfzt" Oct 11 03:56:59 crc kubenswrapper[4798]: I1011 03:56:59.423435 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 11 03:56:59 crc kubenswrapper[4798]: E1011 03:56:59.423599 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 11 03:56:59 crc kubenswrapper[4798]: E1011 03:56:59.423770 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 11 03:56:59 crc kubenswrapper[4798]: E1011 03:56:59.424695 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-5bfzt" podUID="3cf2b185-9a26-4448-8fc5-4885f98daf87" Oct 11 03:56:59 crc kubenswrapper[4798]: E1011 03:56:59.424872 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 11 03:56:59 crc kubenswrapper[4798]: I1011 03:56:59.425291 4798 scope.go:117] "RemoveContainer" containerID="66e2aa76f109bbdb0ec131c0724c460f5c1313b096e2fb3ebf1e42fd4832c88b" Oct 11 03:56:59 crc kubenswrapper[4798]: E1011 03:56:59.425673 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-svt4z_openshift-ovn-kubernetes(3c2a342b-5252-4957-9e2c-8f81c304b8af)\"" pod="openshift-ovn-kubernetes/ovnkube-node-svt4z" podUID="3c2a342b-5252-4957-9e2c-8f81c304b8af" Oct 11 03:57:00 crc kubenswrapper[4798]: I1011 03:57:00.085806 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-p6xdd_bd9c0e44-3329-422a-907b-e9e9bb6194cc/kube-multus/1.log" Oct 11 03:57:00 crc kubenswrapper[4798]: I1011 03:57:00.086886 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-p6xdd_bd9c0e44-3329-422a-907b-e9e9bb6194cc/kube-multus/0.log" Oct 11 03:57:00 crc kubenswrapper[4798]: I1011 03:57:00.087015 4798 generic.go:334] "Generic (PLEG): container finished" podID="bd9c0e44-3329-422a-907b-e9e9bb6194cc" containerID="b5b37de1a8a4b221f421a2bf4f4706f9b2e4ce65611438e10e1bf07daa2bf25a" exitCode=1 Oct 11 03:57:00 crc kubenswrapper[4798]: I1011 03:57:00.087092 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-p6xdd" event={"ID":"bd9c0e44-3329-422a-907b-e9e9bb6194cc","Type":"ContainerDied","Data":"b5b37de1a8a4b221f421a2bf4f4706f9b2e4ce65611438e10e1bf07daa2bf25a"} Oct 11 03:57:00 crc kubenswrapper[4798]: I1011 03:57:00.087175 4798 scope.go:117] "RemoveContainer" containerID="a46e4bfff29af67bd6304e68df8fb4aa7e2184648b85398f453fde878b27d825" Oct 11 03:57:00 crc kubenswrapper[4798]: I1011 03:57:00.087874 4798 scope.go:117] "RemoveContainer" containerID="b5b37de1a8a4b221f421a2bf4f4706f9b2e4ce65611438e10e1bf07daa2bf25a" Oct 11 03:57:00 crc kubenswrapper[4798]: E1011 03:57:00.088194 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-multus pod=multus-p6xdd_openshift-multus(bd9c0e44-3329-422a-907b-e9e9bb6194cc)\"" pod="openshift-multus/multus-p6xdd" podUID="bd9c0e44-3329-422a-907b-e9e9bb6194cc" Oct 11 03:57:01 crc kubenswrapper[4798]: I1011 03:57:01.096246 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-p6xdd_bd9c0e44-3329-422a-907b-e9e9bb6194cc/kube-multus/1.log" Oct 11 03:57:01 crc kubenswrapper[4798]: I1011 03:57:01.422947 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 11 03:57:01 crc kubenswrapper[4798]: I1011 03:57:01.423007 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 11 03:57:01 crc kubenswrapper[4798]: I1011 03:57:01.423049 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 11 03:57:01 crc kubenswrapper[4798]: E1011 03:57:01.423194 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 11 03:57:01 crc kubenswrapper[4798]: I1011 03:57:01.423216 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-5bfzt" Oct 11 03:57:01 crc kubenswrapper[4798]: E1011 03:57:01.423303 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 11 03:57:01 crc kubenswrapper[4798]: E1011 03:57:01.423385 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-5bfzt" podUID="3cf2b185-9a26-4448-8fc5-4885f98daf87" Oct 11 03:57:01 crc kubenswrapper[4798]: E1011 03:57:01.423642 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 11 03:57:03 crc kubenswrapper[4798]: I1011 03:57:03.422818 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-5bfzt" Oct 11 03:57:03 crc kubenswrapper[4798]: I1011 03:57:03.422885 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 11 03:57:03 crc kubenswrapper[4798]: I1011 03:57:03.422899 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 11 03:57:03 crc kubenswrapper[4798]: E1011 03:57:03.423088 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-5bfzt" podUID="3cf2b185-9a26-4448-8fc5-4885f98daf87" Oct 11 03:57:03 crc kubenswrapper[4798]: I1011 03:57:03.423129 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 11 03:57:03 crc kubenswrapper[4798]: E1011 03:57:03.423303 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 11 03:57:03 crc kubenswrapper[4798]: E1011 03:57:03.423552 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 11 03:57:03 crc kubenswrapper[4798]: E1011 03:57:03.423990 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 11 03:57:05 crc kubenswrapper[4798]: E1011 03:57:05.412372 4798 kubelet_node_status.go:497] "Node not becoming ready in time after startup" Oct 11 03:57:05 crc kubenswrapper[4798]: I1011 03:57:05.423482 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-5bfzt" Oct 11 03:57:05 crc kubenswrapper[4798]: I1011 03:57:05.423609 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 11 03:57:05 crc kubenswrapper[4798]: I1011 03:57:05.423658 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 11 03:57:05 crc kubenswrapper[4798]: I1011 03:57:05.423856 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 11 03:57:05 crc kubenswrapper[4798]: E1011 03:57:05.424685 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-5bfzt" podUID="3cf2b185-9a26-4448-8fc5-4885f98daf87" Oct 11 03:57:05 crc kubenswrapper[4798]: E1011 03:57:05.425227 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 11 03:57:05 crc kubenswrapper[4798]: E1011 03:57:05.425347 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 11 03:57:05 crc kubenswrapper[4798]: E1011 03:57:05.425627 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 11 03:57:05 crc kubenswrapper[4798]: E1011 03:57:05.511675 4798 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Oct 11 03:57:07 crc kubenswrapper[4798]: I1011 03:57:07.423118 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 11 03:57:07 crc kubenswrapper[4798]: I1011 03:57:07.423232 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 11 03:57:07 crc kubenswrapper[4798]: I1011 03:57:07.423264 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-5bfzt" Oct 11 03:57:07 crc kubenswrapper[4798]: E1011 03:57:07.423320 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 11 03:57:07 crc kubenswrapper[4798]: I1011 03:57:07.423424 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 11 03:57:07 crc kubenswrapper[4798]: E1011 03:57:07.423646 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 11 03:57:07 crc kubenswrapper[4798]: E1011 03:57:07.423742 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-5bfzt" podUID="3cf2b185-9a26-4448-8fc5-4885f98daf87" Oct 11 03:57:07 crc kubenswrapper[4798]: E1011 03:57:07.423806 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 11 03:57:09 crc kubenswrapper[4798]: I1011 03:57:09.422738 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-5bfzt" Oct 11 03:57:09 crc kubenswrapper[4798]: I1011 03:57:09.422793 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 11 03:57:09 crc kubenswrapper[4798]: I1011 03:57:09.422743 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 11 03:57:09 crc kubenswrapper[4798]: E1011 03:57:09.422920 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 11 03:57:09 crc kubenswrapper[4798]: I1011 03:57:09.422805 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 11 03:57:09 crc kubenswrapper[4798]: E1011 03:57:09.423028 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-5bfzt" podUID="3cf2b185-9a26-4448-8fc5-4885f98daf87" Oct 11 03:57:09 crc kubenswrapper[4798]: E1011 03:57:09.423234 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 11 03:57:09 crc kubenswrapper[4798]: E1011 03:57:09.423480 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 11 03:57:10 crc kubenswrapper[4798]: E1011 03:57:10.513772 4798 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Oct 11 03:57:11 crc kubenswrapper[4798]: I1011 03:57:11.423281 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 11 03:57:11 crc kubenswrapper[4798]: I1011 03:57:11.423456 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 11 03:57:11 crc kubenswrapper[4798]: I1011 03:57:11.423501 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 11 03:57:11 crc kubenswrapper[4798]: I1011 03:57:11.423584 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-5bfzt" Oct 11 03:57:11 crc kubenswrapper[4798]: E1011 03:57:11.424145 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 11 03:57:11 crc kubenswrapper[4798]: E1011 03:57:11.424286 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 11 03:57:11 crc kubenswrapper[4798]: E1011 03:57:11.424493 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-5bfzt" podUID="3cf2b185-9a26-4448-8fc5-4885f98daf87" Oct 11 03:57:11 crc kubenswrapper[4798]: E1011 03:57:11.424546 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 11 03:57:13 crc kubenswrapper[4798]: I1011 03:57:13.423368 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 11 03:57:13 crc kubenswrapper[4798]: I1011 03:57:13.423530 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 11 03:57:13 crc kubenswrapper[4798]: I1011 03:57:13.423760 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 11 03:57:13 crc kubenswrapper[4798]: E1011 03:57:13.423761 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 11 03:57:13 crc kubenswrapper[4798]: I1011 03:57:13.424263 4798 scope.go:117] "RemoveContainer" containerID="b5b37de1a8a4b221f421a2bf4f4706f9b2e4ce65611438e10e1bf07daa2bf25a" Oct 11 03:57:13 crc kubenswrapper[4798]: I1011 03:57:13.424541 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-5bfzt" Oct 11 03:57:13 crc kubenswrapper[4798]: E1011 03:57:13.424678 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 11 03:57:13 crc kubenswrapper[4798]: E1011 03:57:13.424822 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-5bfzt" podUID="3cf2b185-9a26-4448-8fc5-4885f98daf87" Oct 11 03:57:13 crc kubenswrapper[4798]: E1011 03:57:13.424936 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 11 03:57:14 crc kubenswrapper[4798]: I1011 03:57:14.143859 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-p6xdd_bd9c0e44-3329-422a-907b-e9e9bb6194cc/kube-multus/1.log" Oct 11 03:57:14 crc kubenswrapper[4798]: I1011 03:57:14.144255 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-p6xdd" event={"ID":"bd9c0e44-3329-422a-907b-e9e9bb6194cc","Type":"ContainerStarted","Data":"23e0ec8a9881a6c50cee2b2d1a8a8ee8cff0b43249558238022f9310a865b8a2"} Oct 11 03:57:14 crc kubenswrapper[4798]: I1011 03:57:14.423906 4798 scope.go:117] "RemoveContainer" containerID="66e2aa76f109bbdb0ec131c0724c460f5c1313b096e2fb3ebf1e42fd4832c88b" Oct 11 03:57:15 crc kubenswrapper[4798]: I1011 03:57:15.150761 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-svt4z_3c2a342b-5252-4957-9e2c-8f81c304b8af/ovnkube-controller/3.log" Oct 11 03:57:15 crc kubenswrapper[4798]: I1011 03:57:15.154025 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-svt4z" event={"ID":"3c2a342b-5252-4957-9e2c-8f81c304b8af","Type":"ContainerStarted","Data":"25f75915fce6470de042b47f0a1f09f1debcb13ab242d5e2a6bd64c4626c72e8"} Oct 11 03:57:15 crc kubenswrapper[4798]: I1011 03:57:15.154555 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-svt4z" Oct 11 03:57:15 crc kubenswrapper[4798]: I1011 03:57:15.183757 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-svt4z" podStartSLOduration=110.183734433 podStartE2EDuration="1m50.183734433s" podCreationTimestamp="2025-10-11 03:55:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 03:57:15.182681538 +0000 UTC m=+130.518971234" watchObservedRunningTime="2025-10-11 03:57:15.183734433 +0000 UTC m=+130.520024119" Oct 11 03:57:15 crc kubenswrapper[4798]: I1011 03:57:15.191370 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-5bfzt"] Oct 11 03:57:15 crc kubenswrapper[4798]: I1011 03:57:15.191569 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-5bfzt" Oct 11 03:57:15 crc kubenswrapper[4798]: E1011 03:57:15.191691 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-5bfzt" podUID="3cf2b185-9a26-4448-8fc5-4885f98daf87" Oct 11 03:57:15 crc kubenswrapper[4798]: I1011 03:57:15.426954 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 11 03:57:15 crc kubenswrapper[4798]: E1011 03:57:15.427059 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 11 03:57:15 crc kubenswrapper[4798]: I1011 03:57:15.427287 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 11 03:57:15 crc kubenswrapper[4798]: E1011 03:57:15.427353 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 11 03:57:15 crc kubenswrapper[4798]: I1011 03:57:15.427507 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 11 03:57:15 crc kubenswrapper[4798]: E1011 03:57:15.427563 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 11 03:57:15 crc kubenswrapper[4798]: E1011 03:57:15.514602 4798 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Oct 11 03:57:16 crc kubenswrapper[4798]: I1011 03:57:16.423329 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-5bfzt" Oct 11 03:57:16 crc kubenswrapper[4798]: E1011 03:57:16.423520 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-5bfzt" podUID="3cf2b185-9a26-4448-8fc5-4885f98daf87" Oct 11 03:57:17 crc kubenswrapper[4798]: I1011 03:57:17.422779 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 11 03:57:17 crc kubenswrapper[4798]: I1011 03:57:17.422839 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 11 03:57:17 crc kubenswrapper[4798]: I1011 03:57:17.422882 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 11 03:57:17 crc kubenswrapper[4798]: E1011 03:57:17.423002 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 11 03:57:17 crc kubenswrapper[4798]: E1011 03:57:17.423172 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 11 03:57:17 crc kubenswrapper[4798]: E1011 03:57:17.423291 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 11 03:57:18 crc kubenswrapper[4798]: I1011 03:57:18.423254 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-5bfzt" Oct 11 03:57:18 crc kubenswrapper[4798]: E1011 03:57:18.423471 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-5bfzt" podUID="3cf2b185-9a26-4448-8fc5-4885f98daf87" Oct 11 03:57:19 crc kubenswrapper[4798]: I1011 03:57:19.423037 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 11 03:57:19 crc kubenswrapper[4798]: I1011 03:57:19.423131 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 11 03:57:19 crc kubenswrapper[4798]: E1011 03:57:19.423559 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 11 03:57:19 crc kubenswrapper[4798]: E1011 03:57:19.423646 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 11 03:57:19 crc kubenswrapper[4798]: I1011 03:57:19.423133 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 11 03:57:19 crc kubenswrapper[4798]: E1011 03:57:19.423743 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 11 03:57:20 crc kubenswrapper[4798]: I1011 03:57:20.422826 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-5bfzt" Oct 11 03:57:20 crc kubenswrapper[4798]: E1011 03:57:20.423042 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-5bfzt" podUID="3cf2b185-9a26-4448-8fc5-4885f98daf87" Oct 11 03:57:21 crc kubenswrapper[4798]: I1011 03:57:21.422883 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 11 03:57:21 crc kubenswrapper[4798]: I1011 03:57:21.422916 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 11 03:57:21 crc kubenswrapper[4798]: I1011 03:57:21.423178 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 11 03:57:21 crc kubenswrapper[4798]: I1011 03:57:21.425532 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Oct 11 03:57:21 crc kubenswrapper[4798]: I1011 03:57:21.426804 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Oct 11 03:57:21 crc kubenswrapper[4798]: I1011 03:57:21.426876 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Oct 11 03:57:21 crc kubenswrapper[4798]: I1011 03:57:21.426930 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Oct 11 03:57:22 crc kubenswrapper[4798]: I1011 03:57:22.423629 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-5bfzt" Oct 11 03:57:22 crc kubenswrapper[4798]: I1011 03:57:22.426171 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Oct 11 03:57:22 crc kubenswrapper[4798]: I1011 03:57:22.427658 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-sa-dockercfg-d427c" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.376457 4798 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeReady" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.421115 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-db49p"] Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.421912 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-db49p" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.424808 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"serving-cert" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.428474 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-tv8br"] Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.429147 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"openshift-service-ca.crt" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.429225 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-tv8br" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.429551 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"etcd-serving-ca" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.433463 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-wqjfc"] Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.433850 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"image-import-ca" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.434022 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"config" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.434025 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-wqjfc" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.434100 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"etcd-client" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.434323 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"audit-1" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.434349 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-djjff" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.435975 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"encryption-config-1" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.436713 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"kube-root-ca.crt" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.441377 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"audit-1" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.445517 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-8jgvs"] Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.446094 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-8jgvs" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.447977 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-f9d7485db-5lpj9"] Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.448568 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-4wcbm"] Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.448946 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-4wcbm" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.449636 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-5lpj9" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.451970 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-machine-approver/machine-approver-56656f9798-vb9ln"] Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.452664 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-vb9ln" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.461963 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-kw978"] Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.462943 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"encryption-config-1" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.467285 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"openshift-service-ca.crt" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.467663 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.467819 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"oauth-apiserver-sa-dockercfg-6r2bq" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.467944 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"openshift-service-ca.crt" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.468060 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"kube-root-ca.crt" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.468166 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"openshift-service-ca.crt" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.468238 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"openshift-service-ca.crt" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.468362 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"console-config" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.468440 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"trusted-ca-bundle" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.468490 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"machine-api-operator-images" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.468615 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"oauth-serving-cert" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.468667 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"service-ca" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.468733 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"openshift-service-ca.crt" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.468073 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"etcd-serving-ca" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.468832 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-rbac-proxy" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.468837 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"etcd-client" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.469424 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"machine-approver-config" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.469576 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.469864 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-rbac-proxy" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.470023 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"cluster-samples-operator-dockercfg-xpp9w" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.470172 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.470316 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.475579 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.475823 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-root-ca.crt" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.475864 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-dockercfg-f62pw" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.476048 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"kube-root-ca.crt" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.476162 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-serving-cert" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.476428 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"serving-cert" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.476448 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-tls" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.476468 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-oauth-config" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.476516 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-dockercfg-mfbb7" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.476566 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.476579 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"samples-operator-tls" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.476701 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-sa-dockercfg-nl2j4" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.476782 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-tls" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.476832 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"kube-root-ca.crt" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.476933 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-root-ca.crt" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.486930 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console-operator/console-operator-58897d9998-k8p2d"] Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.495264 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-kw978" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.506916 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-nptjh"] Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.507263 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-ndqkx"] Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.507628 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-mdkrr"] Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.507726 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-k8p2d" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.507774 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-nptjh" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.508543 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-lhsx5"] Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.508789 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"trusted-ca-bundle" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.507733 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-ndqkx" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.509058 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-zrx8s"] Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.509206 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-mdkrr" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.509430 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-sxxvx"] Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.509509 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"trusted-ca-bundle" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.509767 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-qv29k"] Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.510257 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-qv29k" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.510690 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-zrx8s" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.510977 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-sxxvx" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.511135 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-lhsx5" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.513723 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.514010 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.514181 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.515102 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.515483 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.515578 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"openshift-service-ca.crt" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.515681 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.515955 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"serving-cert" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.516030 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"service-ca-bundle" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.516059 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.516120 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"console-operator-config" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.516185 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"openshift-service-ca.crt" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.516430 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"serving-cert" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.519454 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"authentication-operator-dockercfg-mz9bj" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.519719 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.519871 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"metrics-tls" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.520229 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.520467 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"cluster-image-registry-operator-dockercfg-m4qtx" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.521643 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"console-operator-dockercfg-4xjcr" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.521780 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.526614 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-5bh8x"] Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.542095 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-operator-tls" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.543841 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-dockercfg-qt55r" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.547728 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.548289 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/0026b84d-ad3a-41e1-bcd3-9a67f7d64ea0-service-ca-bundle\") pod \"authentication-operator-69f744f599-nptjh\" (UID: \"0026b84d-ad3a-41e1-bcd3-9a67f7d64ea0\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-nptjh" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.548352 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9wqdz\" (UniqueName: \"kubernetes.io/projected/467fda03-7ce6-41c9-967c-6e782bd68f90-kube-api-access-9wqdz\") pod \"apiserver-7bbb656c7d-tv8br\" (UID: \"467fda03-7ce6-41c9-967c-6e782bd68f90\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-tv8br" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.548379 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e2211eec-aa3e-429f-9237-fc13fad065ed-config\") pod \"kube-apiserver-operator-766d6c64bb-mdkrr\" (UID: \"e2211eec-aa3e-429f-9237-fc13fad065ed\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-mdkrr" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.548418 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/11f4f0ca-cc1a-4f4e-bdb7-071e41be12ee-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-sxxvx\" (UID: \"11f4f0ca-cc1a-4f4e-bdb7-071e41be12ee\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-sxxvx" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.548438 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4ae1d839-823d-4694-9159-b32ea65ca9d5-config\") pod \"openshift-apiserver-operator-796bbdcf4f-ndqkx\" (UID: \"4ae1d839-823d-4694-9159-b32ea65ca9d5\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-ndqkx" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.548456 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/f9bbcb27-587d-4994-8ef7-25b9ac6bf912-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-kw978\" (UID: \"f9bbcb27-587d-4994-8ef7-25b9ac6bf912\") " pod="openshift-authentication/oauth-openshift-558db77b4-kw978" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.548494 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/f9bbcb27-587d-4994-8ef7-25b9ac6bf912-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-kw978\" (UID: \"f9bbcb27-587d-4994-8ef7-25b9ac6bf912\") " pod="openshift-authentication/oauth-openshift-558db77b4-kw978" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.548518 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/0026b84d-ad3a-41e1-bcd3-9a67f7d64ea0-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-nptjh\" (UID: \"0026b84d-ad3a-41e1-bcd3-9a67f7d64ea0\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-nptjh" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.548535 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sk8z8\" (UniqueName: \"kubernetes.io/projected/9398ffbc-b6a8-4294-83d3-1c80dd716113-kube-api-access-sk8z8\") pod \"console-operator-58897d9998-k8p2d\" (UID: \"9398ffbc-b6a8-4294-83d3-1c80dd716113\") " pod="openshift-console-operator/console-operator-58897d9998-k8p2d" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.548570 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e2211eec-aa3e-429f-9237-fc13fad065ed-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-mdkrr\" (UID: \"e2211eec-aa3e-429f-9237-fc13fad065ed\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-mdkrr" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.548589 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/11f4f0ca-cc1a-4f4e-bdb7-071e41be12ee-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-sxxvx\" (UID: \"11f4f0ca-cc1a-4f4e-bdb7-071e41be12ee\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-sxxvx" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.548620 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/f196154c-0442-4600-967f-74eb36876b52-trusted-ca-bundle\") pod \"apiserver-76f77b778f-db49p\" (UID: \"f196154c-0442-4600-967f-74eb36876b52\") " pod="openshift-apiserver/apiserver-76f77b778f-db49p" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.548662 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/29c4d9ea-078d-4ed3-a56a-c0a29887b6a1-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-wqjfc\" (UID: \"29c4d9ea-078d-4ed3-a56a-c0a29887b6a1\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-wqjfc" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.548682 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/bf88f9b3-812c-45d1-b8a1-58abe81c9d51-machine-approver-tls\") pod \"machine-approver-56656f9798-vb9ln\" (UID: \"bf88f9b3-812c-45d1-b8a1-58abe81c9d51\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-vb9ln" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.548702 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/f196154c-0442-4600-967f-74eb36876b52-image-import-ca\") pod \"apiserver-76f77b778f-db49p\" (UID: \"f196154c-0442-4600-967f-74eb36876b52\") " pod="openshift-apiserver/apiserver-76f77b778f-db49p" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.548738 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e2211eec-aa3e-429f-9237-fc13fad065ed-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-mdkrr\" (UID: \"e2211eec-aa3e-429f-9237-fc13fad065ed\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-mdkrr" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.548757 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mndjz\" (UniqueName: \"kubernetes.io/projected/46ccafd1-b791-4e38-8d42-13aebe2909b2-kube-api-access-mndjz\") pod \"openshift-config-operator-7777fb866f-lhsx5\" (UID: \"46ccafd1-b791-4e38-8d42-13aebe2909b2\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-lhsx5" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.548779 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f196154c-0442-4600-967f-74eb36876b52-config\") pod \"apiserver-76f77b778f-db49p\" (UID: \"f196154c-0442-4600-967f-74eb36876b52\") " pod="openshift-apiserver/apiserver-76f77b778f-db49p" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.548791 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-db49p"] Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.548810 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/f9bbcb27-587d-4994-8ef7-25b9ac6bf912-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-kw978\" (UID: \"f9bbcb27-587d-4994-8ef7-25b9ac6bf912\") " pod="openshift-authentication/oauth-openshift-558db77b4-kw978" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.548824 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress/router-default-5444994796-kgrgq"] Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.548831 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8pcfl\" (UniqueName: \"kubernetes.io/projected/bf88f9b3-812c-45d1-b8a1-58abe81c9d51-kube-api-access-8pcfl\") pod \"machine-approver-56656f9798-vb9ln\" (UID: \"bf88f9b3-812c-45d1-b8a1-58abe81c9d51\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-vb9ln" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.548849 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/467fda03-7ce6-41c9-967c-6e782bd68f90-serving-cert\") pod \"apiserver-7bbb656c7d-tv8br\" (UID: \"467fda03-7ce6-41c9-967c-6e782bd68f90\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-tv8br" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.548890 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kzfhf\" (UniqueName: \"kubernetes.io/projected/f196154c-0442-4600-967f-74eb36876b52-kube-api-access-kzfhf\") pod \"apiserver-76f77b778f-db49p\" (UID: \"f196154c-0442-4600-967f-74eb36876b52\") " pod="openshift-apiserver/apiserver-76f77b778f-db49p" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.548908 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d9eedcac-7bb5-4ddf-a856-387e2fb253b1-config\") pod \"controller-manager-879f6c89f-4wcbm\" (UID: \"d9eedcac-7bb5-4ddf-a856-387e2fb253b1\") " pod="openshift-controller-manager/controller-manager-879f6c89f-4wcbm" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.548925 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/6c3ed6b4-3ffd-462d-b465-f509ef85858b-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-zrx8s\" (UID: \"6c3ed6b4-3ffd-462d-b465-f509ef85858b\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-zrx8s" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.548961 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/f9bbcb27-587d-4994-8ef7-25b9ac6bf912-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-kw978\" (UID: \"f9bbcb27-587d-4994-8ef7-25b9ac6bf912\") " pod="openshift-authentication/oauth-openshift-558db77b4-kw978" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.548980 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bf88f9b3-812c-45d1-b8a1-58abe81c9d51-config\") pod \"machine-approver-56656f9798-vb9ln\" (UID: \"bf88f9b3-812c-45d1-b8a1-58abe81c9d51\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-vb9ln" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.549000 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/467fda03-7ce6-41c9-967c-6e782bd68f90-encryption-config\") pod \"apiserver-7bbb656c7d-tv8br\" (UID: \"467fda03-7ce6-41c9-967c-6e782bd68f90\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-tv8br" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.549020 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p5tj6\" (UniqueName: \"kubernetes.io/projected/29c4d9ea-078d-4ed3-a56a-c0a29887b6a1-kube-api-access-p5tj6\") pod \"machine-api-operator-5694c8668f-wqjfc\" (UID: \"29c4d9ea-078d-4ed3-a56a-c0a29887b6a1\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-wqjfc" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.549056 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/bf88f9b3-812c-45d1-b8a1-58abe81c9d51-auth-proxy-config\") pod \"machine-approver-56656f9798-vb9ln\" (UID: \"bf88f9b3-812c-45d1-b8a1-58abe81c9d51\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-vb9ln" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.549073 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/f196154c-0442-4600-967f-74eb36876b52-encryption-config\") pod \"apiserver-76f77b778f-db49p\" (UID: \"f196154c-0442-4600-967f-74eb36876b52\") " pod="openshift-apiserver/apiserver-76f77b778f-db49p" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.549089 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/806f3022-04e6-4246-a1e3-e9f72066aed3-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-8jgvs\" (UID: \"806f3022-04e6-4246-a1e3-e9f72066aed3\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-8jgvs" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.549123 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-bdw6s"] Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.549528 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-5bh8x" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.549572 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-kgrgq" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.550484 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-jspml"] Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.553116 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-bdw6s" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.553542 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-jspml" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.549124 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/46ccafd1-b791-4e38-8d42-13aebe2909b2-serving-cert\") pod \"openshift-config-operator-7777fb866f-lhsx5\" (UID: \"46ccafd1-b791-4e38-8d42-13aebe2909b2\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-lhsx5" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.553569 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"trusted-ca-bundle" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.553697 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/f9bbcb27-587d-4994-8ef7-25b9ac6bf912-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-kw978\" (UID: \"f9bbcb27-587d-4994-8ef7-25b9ac6bf912\") " pod="openshift-authentication/oauth-openshift-558db77b4-kw978" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.553782 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d9eedcac-7bb5-4ddf-a856-387e2fb253b1-serving-cert\") pod \"controller-manager-879f6c89f-4wcbm\" (UID: \"d9eedcac-7bb5-4ddf-a856-387e2fb253b1\") " pod="openshift-controller-manager/controller-manager-879f6c89f-4wcbm" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.553817 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6hs4f\" (UniqueName: \"kubernetes.io/projected/f9bbcb27-587d-4994-8ef7-25b9ac6bf912-kube-api-access-6hs4f\") pod \"oauth-openshift-558db77b4-kw978\" (UID: \"f9bbcb27-587d-4994-8ef7-25b9ac6bf912\") " pod="openshift-authentication/oauth-openshift-558db77b4-kw978" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.553857 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-frdtx\" (UniqueName: \"kubernetes.io/projected/d9eedcac-7bb5-4ddf-a856-387e2fb253b1-kube-api-access-frdtx\") pod \"controller-manager-879f6c89f-4wcbm\" (UID: \"d9eedcac-7bb5-4ddf-a856-387e2fb253b1\") " pod="openshift-controller-manager/controller-manager-879f6c89f-4wcbm" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.553878 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/f196154c-0442-4600-967f-74eb36876b52-etcd-client\") pod \"apiserver-76f77b778f-db49p\" (UID: \"f196154c-0442-4600-967f-74eb36876b52\") " pod="openshift-apiserver/apiserver-76f77b778f-db49p" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.553899 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/f196154c-0442-4600-967f-74eb36876b52-etcd-serving-ca\") pod \"apiserver-76f77b778f-db49p\" (UID: \"f196154c-0442-4600-967f-74eb36876b52\") " pod="openshift-apiserver/apiserver-76f77b778f-db49p" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.553923 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9398ffbc-b6a8-4294-83d3-1c80dd716113-config\") pod \"console-operator-58897d9998-k8p2d\" (UID: \"9398ffbc-b6a8-4294-83d3-1c80dd716113\") " pod="openshift-console-operator/console-operator-58897d9998-k8p2d" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.553943 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9398ffbc-b6a8-4294-83d3-1c80dd716113-serving-cert\") pod \"console-operator-58897d9998-k8p2d\" (UID: \"9398ffbc-b6a8-4294-83d3-1c80dd716113\") " pod="openshift-console-operator/console-operator-58897d9998-k8p2d" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.553965 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/f9bbcb27-587d-4994-8ef7-25b9ac6bf912-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-kw978\" (UID: \"f9bbcb27-587d-4994-8ef7-25b9ac6bf912\") " pod="openshift-authentication/oauth-openshift-558db77b4-kw978" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.553989 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/f9bbcb27-587d-4994-8ef7-25b9ac6bf912-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-kw978\" (UID: \"f9bbcb27-587d-4994-8ef7-25b9ac6bf912\") " pod="openshift-authentication/oauth-openshift-558db77b4-kw978" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.554136 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/f196154c-0442-4600-967f-74eb36876b52-node-pullsecrets\") pod \"apiserver-76f77b778f-db49p\" (UID: \"f196154c-0442-4600-967f-74eb36876b52\") " pod="openshift-apiserver/apiserver-76f77b778f-db49p" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.554181 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/d9eedcac-7bb5-4ddf-a856-387e2fb253b1-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-4wcbm\" (UID: \"d9eedcac-7bb5-4ddf-a856-387e2fb253b1\") " pod="openshift-controller-manager/controller-manager-879f6c89f-4wcbm" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.554237 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/467fda03-7ce6-41c9-967c-6e782bd68f90-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-tv8br\" (UID: \"467fda03-7ce6-41c9-967c-6e782bd68f90\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-tv8br" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.554265 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/f9bbcb27-587d-4994-8ef7-25b9ac6bf912-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-kw978\" (UID: \"f9bbcb27-587d-4994-8ef7-25b9ac6bf912\") " pod="openshift-authentication/oauth-openshift-558db77b4-kw978" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.554308 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/f9bbcb27-587d-4994-8ef7-25b9ac6bf912-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-kw978\" (UID: \"f9bbcb27-587d-4994-8ef7-25b9ac6bf912\") " pod="openshift-authentication/oauth-openshift-558db77b4-kw978" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.554333 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4ae1d839-823d-4694-9159-b32ea65ca9d5-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-ndqkx\" (UID: \"4ae1d839-823d-4694-9159-b32ea65ca9d5\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-ndqkx" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.554522 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/f196154c-0442-4600-967f-74eb36876b52-audit\") pod \"apiserver-76f77b778f-db49p\" (UID: \"f196154c-0442-4600-967f-74eb36876b52\") " pod="openshift-apiserver/apiserver-76f77b778f-db49p" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.554570 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4pjjx\" (UniqueName: \"kubernetes.io/projected/79cb45d2-e74c-4bda-8464-2c2bbcdbcc4a-kube-api-access-4pjjx\") pod \"console-f9d7485db-5lpj9\" (UID: \"79cb45d2-e74c-4bda-8464-2c2bbcdbcc4a\") " pod="openshift-console/console-f9d7485db-5lpj9" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.554597 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0026b84d-ad3a-41e1-bcd3-9a67f7d64ea0-serving-cert\") pod \"authentication-operator-69f744f599-nptjh\" (UID: \"0026b84d-ad3a-41e1-bcd3-9a67f7d64ea0\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-nptjh" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.554627 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/467fda03-7ce6-41c9-967c-6e782bd68f90-audit-policies\") pod \"apiserver-7bbb656c7d-tv8br\" (UID: \"467fda03-7ce6-41c9-967c-6e782bd68f90\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-tv8br" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.554648 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/467fda03-7ce6-41c9-967c-6e782bd68f90-etcd-client\") pod \"apiserver-7bbb656c7d-tv8br\" (UID: \"467fda03-7ce6-41c9-967c-6e782bd68f90\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-tv8br" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.554680 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/79cb45d2-e74c-4bda-8464-2c2bbcdbcc4a-console-oauth-config\") pod \"console-f9d7485db-5lpj9\" (UID: \"79cb45d2-e74c-4bda-8464-2c2bbcdbcc4a\") " pod="openshift-console/console-f9d7485db-5lpj9" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.554705 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/29c4d9ea-078d-4ed3-a56a-c0a29887b6a1-config\") pod \"machine-api-operator-5694c8668f-wqjfc\" (UID: \"29c4d9ea-078d-4ed3-a56a-c0a29887b6a1\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-wqjfc" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.554771 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/46ccafd1-b791-4e38-8d42-13aebe2909b2-available-featuregates\") pod \"openshift-config-operator-7777fb866f-lhsx5\" (UID: \"46ccafd1-b791-4e38-8d42-13aebe2909b2\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-lhsx5" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.554780 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"kube-root-ca.crt" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.554804 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/79cb45d2-e74c-4bda-8464-2c2bbcdbcc4a-oauth-serving-cert\") pod \"console-f9d7485db-5lpj9\" (UID: \"79cb45d2-e74c-4bda-8464-2c2bbcdbcc4a\") " pod="openshift-console/console-f9d7485db-5lpj9" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.554826 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/29c4d9ea-078d-4ed3-a56a-c0a29887b6a1-images\") pod \"machine-api-operator-5694c8668f-wqjfc\" (UID: \"29c4d9ea-078d-4ed3-a56a-c0a29887b6a1\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-wqjfc" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.554918 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tvqph\" (UniqueName: \"kubernetes.io/projected/0026b84d-ad3a-41e1-bcd3-9a67f7d64ea0-kube-api-access-tvqph\") pod \"authentication-operator-69f744f599-nptjh\" (UID: \"0026b84d-ad3a-41e1-bcd3-9a67f7d64ea0\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-nptjh" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.554948 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f196154c-0442-4600-967f-74eb36876b52-serving-cert\") pod \"apiserver-76f77b778f-db49p\" (UID: \"f196154c-0442-4600-967f-74eb36876b52\") " pod="openshift-apiserver/apiserver-76f77b778f-db49p" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.555091 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.555122 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/6c3ed6b4-3ffd-462d-b465-f509ef85858b-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-zrx8s\" (UID: \"6c3ed6b4-3ffd-462d-b465-f509ef85858b\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-zrx8s" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.555273 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-root-ca.crt" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.555515 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/f9bbcb27-587d-4994-8ef7-25b9ac6bf912-audit-policies\") pod \"oauth-openshift-558db77b4-kw978\" (UID: \"f9bbcb27-587d-4994-8ef7-25b9ac6bf912\") " pod="openshift-authentication/oauth-openshift-558db77b4-kw978" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.555593 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0026b84d-ad3a-41e1-bcd3-9a67f7d64ea0-config\") pod \"authentication-operator-69f744f599-nptjh\" (UID: \"0026b84d-ad3a-41e1-bcd3-9a67f7d64ea0\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-nptjh" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.555610 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.555622 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/467fda03-7ce6-41c9-967c-6e782bd68f90-audit-dir\") pod \"apiserver-7bbb656c7d-tv8br\" (UID: \"467fda03-7ce6-41c9-967c-6e782bd68f90\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-tv8br" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.555635 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.555666 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mkrbf\" (UniqueName: \"kubernetes.io/projected/6c3ed6b4-3ffd-462d-b465-f509ef85858b-kube-api-access-mkrbf\") pod \"cluster-image-registry-operator-dc59b4c8b-zrx8s\" (UID: \"6c3ed6b4-3ffd-462d-b465-f509ef85858b\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-zrx8s" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.555688 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"kube-root-ca.crt" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.555761 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"kube-root-ca.crt" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.555852 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"config-operator-serving-cert" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.555927 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.555692 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/11f4f0ca-cc1a-4f4e-bdb7-071e41be12ee-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-sxxvx\" (UID: \"11f4f0ca-cc1a-4f4e-bdb7-071e41be12ee\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-sxxvx" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.555966 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.556027 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/79cb45d2-e74c-4bda-8464-2c2bbcdbcc4a-service-ca\") pod \"console-f9d7485db-5lpj9\" (UID: \"79cb45d2-e74c-4bda-8464-2c2bbcdbcc4a\") " pod="openshift-console/console-f9d7485db-5lpj9" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.556058 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/79cb45d2-e74c-4bda-8464-2c2bbcdbcc4a-trusted-ca-bundle\") pod \"console-f9d7485db-5lpj9\" (UID: \"79cb45d2-e74c-4bda-8464-2c2bbcdbcc4a\") " pod="openshift-console/console-f9d7485db-5lpj9" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.556084 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/6c3ed6b4-3ffd-462d-b465-f509ef85858b-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-zrx8s\" (UID: \"6c3ed6b4-3ffd-462d-b465-f509ef85858b\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-zrx8s" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.556131 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/79cb45d2-e74c-4bda-8464-2c2bbcdbcc4a-console-serving-cert\") pod \"console-f9d7485db-5lpj9\" (UID: \"79cb45d2-e74c-4bda-8464-2c2bbcdbcc4a\") " pod="openshift-console/console-f9d7485db-5lpj9" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.556142 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-dockercfg-x57mr" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.556152 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.556160 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f196154c-0442-4600-967f-74eb36876b52-audit-dir\") pod \"apiserver-76f77b778f-db49p\" (UID: \"f196154c-0442-4600-967f-74eb36876b52\") " pod="openshift-apiserver/apiserver-76f77b778f-db49p" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.556184 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/f9bbcb27-587d-4994-8ef7-25b9ac6bf912-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-kw978\" (UID: \"f9bbcb27-587d-4994-8ef7-25b9ac6bf912\") " pod="openshift-authentication/oauth-openshift-558db77b4-kw978" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.556210 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/f9bbcb27-587d-4994-8ef7-25b9ac6bf912-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-kw978\" (UID: \"f9bbcb27-587d-4994-8ef7-25b9ac6bf912\") " pod="openshift-authentication/oauth-openshift-558db77b4-kw978" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.556257 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9398ffbc-b6a8-4294-83d3-1c80dd716113-trusted-ca\") pod \"console-operator-58897d9998-k8p2d\" (UID: \"9398ffbc-b6a8-4294-83d3-1c80dd716113\") " pod="openshift-console-operator/console-operator-58897d9998-k8p2d" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.556269 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.556286 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/79cb45d2-e74c-4bda-8464-2c2bbcdbcc4a-console-config\") pod \"console-f9d7485db-5lpj9\" (UID: \"79cb45d2-e74c-4bda-8464-2c2bbcdbcc4a\") " pod="openshift-console/console-f9d7485db-5lpj9" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.556308 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f9bbcb27-587d-4994-8ef7-25b9ac6bf912-audit-dir\") pod \"oauth-openshift-558db77b4-kw978\" (UID: \"f9bbcb27-587d-4994-8ef7-25b9ac6bf912\") " pod="openshift-authentication/oauth-openshift-558db77b4-kw978" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.556332 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-49z9v\" (UniqueName: \"kubernetes.io/projected/806f3022-04e6-4246-a1e3-e9f72066aed3-kube-api-access-49z9v\") pod \"cluster-samples-operator-665b6dd947-8jgvs\" (UID: \"806f3022-04e6-4246-a1e3-e9f72066aed3\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-8jgvs" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.563796 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"dns-operator-dockercfg-9mqw5" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.564761 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-service-ca.crt" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.566300 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/downloads-7954f5f757-g2k9v"] Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.567257 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-g2k9v" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.569781 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.572240 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"kube-root-ca.crt" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.572921 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.587948 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"authentication-operator-config" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.588511 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"openshift-service-ca.crt" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.591275 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"openshift-service-ca.crt" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.591483 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"kube-root-ca.crt" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.591708 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"kube-root-ca.crt" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.591836 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xrfgj\" (UniqueName: \"kubernetes.io/projected/4ae1d839-823d-4694-9159-b32ea65ca9d5-kube-api-access-xrfgj\") pod \"openshift-apiserver-operator-796bbdcf4f-ndqkx\" (UID: \"4ae1d839-823d-4694-9159-b32ea65ca9d5\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-ndqkx" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.591981 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-dockercfg-xtcjv" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.591982 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/d9eedcac-7bb5-4ddf-a856-387e2fb253b1-client-ca\") pod \"controller-manager-879f6c89f-4wcbm\" (UID: \"d9eedcac-7bb5-4ddf-a856-387e2fb253b1\") " pod="openshift-controller-manager/controller-manager-879f6c89f-4wcbm" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.592096 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/467fda03-7ce6-41c9-967c-6e782bd68f90-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-tv8br\" (UID: \"467fda03-7ce6-41c9-967c-6e782bd68f90\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-tv8br" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.597227 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-g52t6"] Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.598038 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-g52t6" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.598785 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.598838 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"kube-root-ca.crt" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.599982 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"openshift-config-operator-dockercfg-7pc5z" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.600373 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.601699 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"trusted-ca" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.602863 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"trusted-ca" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.603838 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-dockercfg-vw8fw" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.604046 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-xv9zg"] Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.604736 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-wfsmq"] Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.605062 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-xv9zg" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.605420 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-wfsmq" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.606350 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-n9wgt"] Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.607195 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-n9wgt" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.607454 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-lplcw"] Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.608351 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-lplcw" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.609273 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-jsvs2"] Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.609843 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-jsvs2" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.610223 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-rxtt6"] Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.610629 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-rxtt6" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.611167 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-dp4kr"] Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.611689 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-dp4kr" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.613507 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-jtrv7"] Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.614127 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-jtrv7" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.614299 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-cs7dc"] Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.614738 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-cs7dc" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.616297 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-68b7z"] Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.616899 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-68b7z" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.616936 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-64mrh"] Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.617353 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-64mrh" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.619308 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-g95pt"] Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.619730 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-g95pt" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.620309 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29335905-vgmmx"] Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.620682 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29335905-vgmmx" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.621359 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-48l5m"] Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.622033 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-48l5m" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.622163 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-9c8v5"] Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.622718 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-9c8v5" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.623749 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-xndjl"] Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.625327 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-xndjl" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.625338 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-tv8br"] Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.626322 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-8jgvs"] Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.626857 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"openshift-service-ca.crt" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.627497 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-zrx8s"] Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.642568 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-nptjh"] Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.647279 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-k8p2d"] Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.647816 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.653217 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-4wcbm"] Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.657148 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-kw978"] Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.660478 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-canary/ingress-canary-rdggm"] Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.661275 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-rdggm" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.661722 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-5bh8x"] Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.663415 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-jsvs2"] Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.664937 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-service-ca.crt" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.667653 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-mdkrr"] Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.668654 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-wfsmq"] Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.670004 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-xndjl"] Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.671520 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-qv29k"] Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.672958 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-ndqkx"] Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.674703 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-sxxvx"] Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.676296 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-g2k9v"] Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.677676 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29335905-vgmmx"] Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.678939 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-wqjfc"] Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.683419 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-5lpj9"] Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.684499 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-lhsx5"] Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.685014 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-tls" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.686268 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-jspml"] Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.687997 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-bdw6s"] Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.689441 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-rxtt6"] Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.691132 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-g52t6"] Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.692435 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-dxlvp"] Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.693096 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p5tj6\" (UniqueName: \"kubernetes.io/projected/29c4d9ea-078d-4ed3-a56a-c0a29887b6a1-kube-api-access-p5tj6\") pod \"machine-api-operator-5694c8668f-wqjfc\" (UID: \"29c4d9ea-078d-4ed3-a56a-c0a29887b6a1\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-wqjfc" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.693141 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bf88f9b3-812c-45d1-b8a1-58abe81c9d51-config\") pod \"machine-approver-56656f9798-vb9ln\" (UID: \"bf88f9b3-812c-45d1-b8a1-58abe81c9d51\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-vb9ln" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.693179 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/467fda03-7ce6-41c9-967c-6e782bd68f90-encryption-config\") pod \"apiserver-7bbb656c7d-tv8br\" (UID: \"467fda03-7ce6-41c9-967c-6e782bd68f90\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-tv8br" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.693208 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/f196154c-0442-4600-967f-74eb36876b52-encryption-config\") pod \"apiserver-76f77b778f-db49p\" (UID: \"f196154c-0442-4600-967f-74eb36876b52\") " pod="openshift-apiserver/apiserver-76f77b778f-db49p" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.693238 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/806f3022-04e6-4246-a1e3-e9f72066aed3-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-8jgvs\" (UID: \"806f3022-04e6-4246-a1e3-e9f72066aed3\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-8jgvs" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.693264 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/46ccafd1-b791-4e38-8d42-13aebe2909b2-serving-cert\") pod \"openshift-config-operator-7777fb866f-lhsx5\" (UID: \"46ccafd1-b791-4e38-8d42-13aebe2909b2\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-lhsx5" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.693291 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/f9bbcb27-587d-4994-8ef7-25b9ac6bf912-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-kw978\" (UID: \"f9bbcb27-587d-4994-8ef7-25b9ac6bf912\") " pod="openshift-authentication/oauth-openshift-558db77b4-kw978" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.693316 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/bf88f9b3-812c-45d1-b8a1-58abe81c9d51-auth-proxy-config\") pod \"machine-approver-56656f9798-vb9ln\" (UID: \"bf88f9b3-812c-45d1-b8a1-58abe81c9d51\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-vb9ln" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.693340 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d9eedcac-7bb5-4ddf-a856-387e2fb253b1-serving-cert\") pod \"controller-manager-879f6c89f-4wcbm\" (UID: \"d9eedcac-7bb5-4ddf-a856-387e2fb253b1\") " pod="openshift-controller-manager/controller-manager-879f6c89f-4wcbm" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.693363 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6hs4f\" (UniqueName: \"kubernetes.io/projected/f9bbcb27-587d-4994-8ef7-25b9ac6bf912-kube-api-access-6hs4f\") pod \"oauth-openshift-558db77b4-kw978\" (UID: \"f9bbcb27-587d-4994-8ef7-25b9ac6bf912\") " pod="openshift-authentication/oauth-openshift-558db77b4-kw978" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.693428 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-frdtx\" (UniqueName: \"kubernetes.io/projected/d9eedcac-7bb5-4ddf-a856-387e2fb253b1-kube-api-access-frdtx\") pod \"controller-manager-879f6c89f-4wcbm\" (UID: \"d9eedcac-7bb5-4ddf-a856-387e2fb253b1\") " pod="openshift-controller-manager/controller-manager-879f6c89f-4wcbm" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.693456 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/f196154c-0442-4600-967f-74eb36876b52-etcd-client\") pod \"apiserver-76f77b778f-db49p\" (UID: \"f196154c-0442-4600-967f-74eb36876b52\") " pod="openshift-apiserver/apiserver-76f77b778f-db49p" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.693482 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9398ffbc-b6a8-4294-83d3-1c80dd716113-config\") pod \"console-operator-58897d9998-k8p2d\" (UID: \"9398ffbc-b6a8-4294-83d3-1c80dd716113\") " pod="openshift-console-operator/console-operator-58897d9998-k8p2d" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.693506 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9398ffbc-b6a8-4294-83d3-1c80dd716113-serving-cert\") pod \"console-operator-58897d9998-k8p2d\" (UID: \"9398ffbc-b6a8-4294-83d3-1c80dd716113\") " pod="openshift-console-operator/console-operator-58897d9998-k8p2d" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.693533 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/f9bbcb27-587d-4994-8ef7-25b9ac6bf912-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-kw978\" (UID: \"f9bbcb27-587d-4994-8ef7-25b9ac6bf912\") " pod="openshift-authentication/oauth-openshift-558db77b4-kw978" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.693586 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/f9bbcb27-587d-4994-8ef7-25b9ac6bf912-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-kw978\" (UID: \"f9bbcb27-587d-4994-8ef7-25b9ac6bf912\") " pod="openshift-authentication/oauth-openshift-558db77b4-kw978" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.693616 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/f196154c-0442-4600-967f-74eb36876b52-etcd-serving-ca\") pod \"apiserver-76f77b778f-db49p\" (UID: \"f196154c-0442-4600-967f-74eb36876b52\") " pod="openshift-apiserver/apiserver-76f77b778f-db49p" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.693654 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/f196154c-0442-4600-967f-74eb36876b52-node-pullsecrets\") pod \"apiserver-76f77b778f-db49p\" (UID: \"f196154c-0442-4600-967f-74eb36876b52\") " pod="openshift-apiserver/apiserver-76f77b778f-db49p" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.693685 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/d9eedcac-7bb5-4ddf-a856-387e2fb253b1-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-4wcbm\" (UID: \"d9eedcac-7bb5-4ddf-a856-387e2fb253b1\") " pod="openshift-controller-manager/controller-manager-879f6c89f-4wcbm" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.693717 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t7p2m\" (UniqueName: \"kubernetes.io/projected/9bb478ba-d274-4a8f-a67d-635740a72f34-kube-api-access-t7p2m\") pod \"kube-storage-version-migrator-operator-b67b599dd-wfsmq\" (UID: \"9bb478ba-d274-4a8f-a67d-635740a72f34\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-wfsmq" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.693748 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/467fda03-7ce6-41c9-967c-6e782bd68f90-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-tv8br\" (UID: \"467fda03-7ce6-41c9-967c-6e782bd68f90\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-tv8br" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.693752 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bf88f9b3-812c-45d1-b8a1-58abe81c9d51-config\") pod \"machine-approver-56656f9798-vb9ln\" (UID: \"bf88f9b3-812c-45d1-b8a1-58abe81c9d51\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-vb9ln" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.693775 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/b193848f-5963-432c-964b-460721f701c3-metrics-tls\") pod \"dns-operator-744455d44c-qv29k\" (UID: \"b193848f-5963-432c-964b-460721f701c3\") " pod="openshift-dns-operator/dns-operator-744455d44c-qv29k" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.693838 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4ae1d839-823d-4694-9159-b32ea65ca9d5-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-ndqkx\" (UID: \"4ae1d839-823d-4694-9159-b32ea65ca9d5\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-ndqkx" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.693875 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/f9bbcb27-587d-4994-8ef7-25b9ac6bf912-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-kw978\" (UID: \"f9bbcb27-587d-4994-8ef7-25b9ac6bf912\") " pod="openshift-authentication/oauth-openshift-558db77b4-kw978" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.693934 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/f9bbcb27-587d-4994-8ef7-25b9ac6bf912-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-kw978\" (UID: \"f9bbcb27-587d-4994-8ef7-25b9ac6bf912\") " pod="openshift-authentication/oauth-openshift-558db77b4-kw978" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.693963 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/f196154c-0442-4600-967f-74eb36876b52-audit\") pod \"apiserver-76f77b778f-db49p\" (UID: \"f196154c-0442-4600-967f-74eb36876b52\") " pod="openshift-apiserver/apiserver-76f77b778f-db49p" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.693990 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4pjjx\" (UniqueName: \"kubernetes.io/projected/79cb45d2-e74c-4bda-8464-2c2bbcdbcc4a-kube-api-access-4pjjx\") pod \"console-f9d7485db-5lpj9\" (UID: \"79cb45d2-e74c-4bda-8464-2c2bbcdbcc4a\") " pod="openshift-console/console-f9d7485db-5lpj9" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.694043 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/467fda03-7ce6-41c9-967c-6e782bd68f90-audit-policies\") pod \"apiserver-7bbb656c7d-tv8br\" (UID: \"467fda03-7ce6-41c9-967c-6e782bd68f90\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-tv8br" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.694071 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/467fda03-7ce6-41c9-967c-6e782bd68f90-etcd-client\") pod \"apiserver-7bbb656c7d-tv8br\" (UID: \"467fda03-7ce6-41c9-967c-6e782bd68f90\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-tv8br" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.694131 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/79cb45d2-e74c-4bda-8464-2c2bbcdbcc4a-console-oauth-config\") pod \"console-f9d7485db-5lpj9\" (UID: \"79cb45d2-e74c-4bda-8464-2c2bbcdbcc4a\") " pod="openshift-console/console-f9d7485db-5lpj9" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.694160 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/29c4d9ea-078d-4ed3-a56a-c0a29887b6a1-config\") pod \"machine-api-operator-5694c8668f-wqjfc\" (UID: \"29c4d9ea-078d-4ed3-a56a-c0a29887b6a1\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-wqjfc" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.694211 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/46ccafd1-b791-4e38-8d42-13aebe2909b2-available-featuregates\") pod \"openshift-config-operator-7777fb866f-lhsx5\" (UID: \"46ccafd1-b791-4e38-8d42-13aebe2909b2\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-lhsx5" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.694429 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0026b84d-ad3a-41e1-bcd3-9a67f7d64ea0-serving-cert\") pod \"authentication-operator-69f744f599-nptjh\" (UID: \"0026b84d-ad3a-41e1-bcd3-9a67f7d64ea0\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-nptjh" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.694494 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/79cb45d2-e74c-4bda-8464-2c2bbcdbcc4a-oauth-serving-cert\") pod \"console-f9d7485db-5lpj9\" (UID: \"79cb45d2-e74c-4bda-8464-2c2bbcdbcc4a\") " pod="openshift-console/console-f9d7485db-5lpj9" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.694528 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/29c4d9ea-078d-4ed3-a56a-c0a29887b6a1-images\") pod \"machine-api-operator-5694c8668f-wqjfc\" (UID: \"29c4d9ea-078d-4ed3-a56a-c0a29887b6a1\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-wqjfc" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.694582 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tvqph\" (UniqueName: \"kubernetes.io/projected/0026b84d-ad3a-41e1-bcd3-9a67f7d64ea0-kube-api-access-tvqph\") pod \"authentication-operator-69f744f599-nptjh\" (UID: \"0026b84d-ad3a-41e1-bcd3-9a67f7d64ea0\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-nptjh" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.694614 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f196154c-0442-4600-967f-74eb36876b52-serving-cert\") pod \"apiserver-76f77b778f-db49p\" (UID: \"f196154c-0442-4600-967f-74eb36876b52\") " pod="openshift-apiserver/apiserver-76f77b778f-db49p" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.694669 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/6c3ed6b4-3ffd-462d-b465-f509ef85858b-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-zrx8s\" (UID: \"6c3ed6b4-3ffd-462d-b465-f509ef85858b\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-zrx8s" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.694700 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/f9bbcb27-587d-4994-8ef7-25b9ac6bf912-audit-policies\") pod \"oauth-openshift-558db77b4-kw978\" (UID: \"f9bbcb27-587d-4994-8ef7-25b9ac6bf912\") " pod="openshift-authentication/oauth-openshift-558db77b4-kw978" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.694728 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mkrbf\" (UniqueName: \"kubernetes.io/projected/6c3ed6b4-3ffd-462d-b465-f509ef85858b-kube-api-access-mkrbf\") pod \"cluster-image-registry-operator-dc59b4c8b-zrx8s\" (UID: \"6c3ed6b4-3ffd-462d-b465-f509ef85858b\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-zrx8s" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.694756 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/11f4f0ca-cc1a-4f4e-bdb7-071e41be12ee-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-sxxvx\" (UID: \"11f4f0ca-cc1a-4f4e-bdb7-071e41be12ee\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-sxxvx" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.694786 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0026b84d-ad3a-41e1-bcd3-9a67f7d64ea0-config\") pod \"authentication-operator-69f744f599-nptjh\" (UID: \"0026b84d-ad3a-41e1-bcd3-9a67f7d64ea0\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-nptjh" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.694817 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/467fda03-7ce6-41c9-967c-6e782bd68f90-audit-dir\") pod \"apiserver-7bbb656c7d-tv8br\" (UID: \"467fda03-7ce6-41c9-967c-6e782bd68f90\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-tv8br" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.694841 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/79cb45d2-e74c-4bda-8464-2c2bbcdbcc4a-trusted-ca-bundle\") pod \"console-f9d7485db-5lpj9\" (UID: \"79cb45d2-e74c-4bda-8464-2c2bbcdbcc4a\") " pod="openshift-console/console-f9d7485db-5lpj9" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.694867 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/6c3ed6b4-3ffd-462d-b465-f509ef85858b-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-zrx8s\" (UID: \"6c3ed6b4-3ffd-462d-b465-f509ef85858b\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-zrx8s" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.694893 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/687b3ca2-3a7f-4d1d-be5b-8c3dfe9082e5-bound-sa-token\") pod \"ingress-operator-5b745b69d9-n9wgt\" (UID: \"687b3ca2-3a7f-4d1d-be5b-8c3dfe9082e5\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-n9wgt" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.694923 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/79cb45d2-e74c-4bda-8464-2c2bbcdbcc4a-service-ca\") pod \"console-f9d7485db-5lpj9\" (UID: \"79cb45d2-e74c-4bda-8464-2c2bbcdbcc4a\") " pod="openshift-console/console-f9d7485db-5lpj9" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.694952 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/79cb45d2-e74c-4bda-8464-2c2bbcdbcc4a-console-serving-cert\") pod \"console-f9d7485db-5lpj9\" (UID: \"79cb45d2-e74c-4bda-8464-2c2bbcdbcc4a\") " pod="openshift-console/console-f9d7485db-5lpj9" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.694991 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/f9bbcb27-587d-4994-8ef7-25b9ac6bf912-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-kw978\" (UID: \"f9bbcb27-587d-4994-8ef7-25b9ac6bf912\") " pod="openshift-authentication/oauth-openshift-558db77b4-kw978" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.695020 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/f9bbcb27-587d-4994-8ef7-25b9ac6bf912-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-kw978\" (UID: \"f9bbcb27-587d-4994-8ef7-25b9ac6bf912\") " pod="openshift-authentication/oauth-openshift-558db77b4-kw978" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.695084 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ql96p\" (UniqueName: \"kubernetes.io/projected/b193848f-5963-432c-964b-460721f701c3-kube-api-access-ql96p\") pod \"dns-operator-744455d44c-qv29k\" (UID: \"b193848f-5963-432c-964b-460721f701c3\") " pod="openshift-dns-operator/dns-operator-744455d44c-qv29k" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.695115 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ghqn2\" (UniqueName: \"kubernetes.io/projected/687b3ca2-3a7f-4d1d-be5b-8c3dfe9082e5-kube-api-access-ghqn2\") pod \"ingress-operator-5b745b69d9-n9wgt\" (UID: \"687b3ca2-3a7f-4d1d-be5b-8c3dfe9082e5\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-n9wgt" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.695189 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f196154c-0442-4600-967f-74eb36876b52-audit-dir\") pod \"apiserver-76f77b778f-db49p\" (UID: \"f196154c-0442-4600-967f-74eb36876b52\") " pod="openshift-apiserver/apiserver-76f77b778f-db49p" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.695216 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9398ffbc-b6a8-4294-83d3-1c80dd716113-trusted-ca\") pod \"console-operator-58897d9998-k8p2d\" (UID: \"9398ffbc-b6a8-4294-83d3-1c80dd716113\") " pod="openshift-console-operator/console-operator-58897d9998-k8p2d" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.695273 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/79cb45d2-e74c-4bda-8464-2c2bbcdbcc4a-console-config\") pod \"console-f9d7485db-5lpj9\" (UID: \"79cb45d2-e74c-4bda-8464-2c2bbcdbcc4a\") " pod="openshift-console/console-f9d7485db-5lpj9" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.695302 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f9bbcb27-587d-4994-8ef7-25b9ac6bf912-audit-dir\") pod \"oauth-openshift-558db77b4-kw978\" (UID: \"f9bbcb27-587d-4994-8ef7-25b9ac6bf912\") " pod="openshift-authentication/oauth-openshift-558db77b4-kw978" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.695357 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/d9eedcac-7bb5-4ddf-a856-387e2fb253b1-client-ca\") pod \"controller-manager-879f6c89f-4wcbm\" (UID: \"d9eedcac-7bb5-4ddf-a856-387e2fb253b1\") " pod="openshift-controller-manager/controller-manager-879f6c89f-4wcbm" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.695412 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-49z9v\" (UniqueName: \"kubernetes.io/projected/806f3022-04e6-4246-a1e3-e9f72066aed3-kube-api-access-49z9v\") pod \"cluster-samples-operator-665b6dd947-8jgvs\" (UID: \"806f3022-04e6-4246-a1e3-e9f72066aed3\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-8jgvs" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.695450 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xrfgj\" (UniqueName: \"kubernetes.io/projected/4ae1d839-823d-4694-9159-b32ea65ca9d5-kube-api-access-xrfgj\") pod \"openshift-apiserver-operator-796bbdcf4f-ndqkx\" (UID: \"4ae1d839-823d-4694-9159-b32ea65ca9d5\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-ndqkx" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.695478 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/467fda03-7ce6-41c9-967c-6e782bd68f90-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-tv8br\" (UID: \"467fda03-7ce6-41c9-967c-6e782bd68f90\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-tv8br" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.695505 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/687b3ca2-3a7f-4d1d-be5b-8c3dfe9082e5-metrics-tls\") pod \"ingress-operator-5b745b69d9-n9wgt\" (UID: \"687b3ca2-3a7f-4d1d-be5b-8c3dfe9082e5\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-n9wgt" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.695536 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/0026b84d-ad3a-41e1-bcd3-9a67f7d64ea0-service-ca-bundle\") pod \"authentication-operator-69f744f599-nptjh\" (UID: \"0026b84d-ad3a-41e1-bcd3-9a67f7d64ea0\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-nptjh" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.695567 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9wqdz\" (UniqueName: \"kubernetes.io/projected/467fda03-7ce6-41c9-967c-6e782bd68f90-kube-api-access-9wqdz\") pod \"apiserver-7bbb656c7d-tv8br\" (UID: \"467fda03-7ce6-41c9-967c-6e782bd68f90\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-tv8br" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.695595 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e2211eec-aa3e-429f-9237-fc13fad065ed-config\") pod \"kube-apiserver-operator-766d6c64bb-mdkrr\" (UID: \"e2211eec-aa3e-429f-9237-fc13fad065ed\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-mdkrr" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.695621 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/11f4f0ca-cc1a-4f4e-bdb7-071e41be12ee-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-sxxvx\" (UID: \"11f4f0ca-cc1a-4f4e-bdb7-071e41be12ee\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-sxxvx" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.695648 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4ae1d839-823d-4694-9159-b32ea65ca9d5-config\") pod \"openshift-apiserver-operator-796bbdcf4f-ndqkx\" (UID: \"4ae1d839-823d-4694-9159-b32ea65ca9d5\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-ndqkx" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.695698 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/f9bbcb27-587d-4994-8ef7-25b9ac6bf912-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-kw978\" (UID: \"f9bbcb27-587d-4994-8ef7-25b9ac6bf912\") " pod="openshift-authentication/oauth-openshift-558db77b4-kw978" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.695729 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/f9bbcb27-587d-4994-8ef7-25b9ac6bf912-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-kw978\" (UID: \"f9bbcb27-587d-4994-8ef7-25b9ac6bf912\") " pod="openshift-authentication/oauth-openshift-558db77b4-kw978" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.695792 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e2211eec-aa3e-429f-9237-fc13fad065ed-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-mdkrr\" (UID: \"e2211eec-aa3e-429f-9237-fc13fad065ed\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-mdkrr" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.695822 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/11f4f0ca-cc1a-4f4e-bdb7-071e41be12ee-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-sxxvx\" (UID: \"11f4f0ca-cc1a-4f4e-bdb7-071e41be12ee\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-sxxvx" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.695851 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9bb478ba-d274-4a8f-a67d-635740a72f34-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-wfsmq\" (UID: \"9bb478ba-d274-4a8f-a67d-635740a72f34\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-wfsmq" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.695875 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/0026b84d-ad3a-41e1-bcd3-9a67f7d64ea0-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-nptjh\" (UID: \"0026b84d-ad3a-41e1-bcd3-9a67f7d64ea0\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-nptjh" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.695902 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sk8z8\" (UniqueName: \"kubernetes.io/projected/9398ffbc-b6a8-4294-83d3-1c80dd716113-kube-api-access-sk8z8\") pod \"console-operator-58897d9998-k8p2d\" (UID: \"9398ffbc-b6a8-4294-83d3-1c80dd716113\") " pod="openshift-console-operator/console-operator-58897d9998-k8p2d" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.695931 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/f196154c-0442-4600-967f-74eb36876b52-trusted-ca-bundle\") pod \"apiserver-76f77b778f-db49p\" (UID: \"f196154c-0442-4600-967f-74eb36876b52\") " pod="openshift-apiserver/apiserver-76f77b778f-db49p" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.695993 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/29c4d9ea-078d-4ed3-a56a-c0a29887b6a1-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-wqjfc\" (UID: \"29c4d9ea-078d-4ed3-a56a-c0a29887b6a1\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-wqjfc" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.696003 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f9bbcb27-587d-4994-8ef7-25b9ac6bf912-audit-dir\") pod \"oauth-openshift-558db77b4-kw978\" (UID: \"f9bbcb27-587d-4994-8ef7-25b9ac6bf912\") " pod="openshift-authentication/oauth-openshift-558db77b4-kw978" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.696025 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/f196154c-0442-4600-967f-74eb36876b52-image-import-ca\") pod \"apiserver-76f77b778f-db49p\" (UID: \"f196154c-0442-4600-967f-74eb36876b52\") " pod="openshift-apiserver/apiserver-76f77b778f-db49p" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.696072 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e2211eec-aa3e-429f-9237-fc13fad065ed-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-mdkrr\" (UID: \"e2211eec-aa3e-429f-9237-fc13fad065ed\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-mdkrr" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.696103 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mndjz\" (UniqueName: \"kubernetes.io/projected/46ccafd1-b791-4e38-8d42-13aebe2909b2-kube-api-access-mndjz\") pod \"openshift-config-operator-7777fb866f-lhsx5\" (UID: \"46ccafd1-b791-4e38-8d42-13aebe2909b2\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-lhsx5" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.696127 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/bf88f9b3-812c-45d1-b8a1-58abe81c9d51-machine-approver-tls\") pod \"machine-approver-56656f9798-vb9ln\" (UID: \"bf88f9b3-812c-45d1-b8a1-58abe81c9d51\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-vb9ln" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.696145 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/f9bbcb27-587d-4994-8ef7-25b9ac6bf912-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-kw978\" (UID: \"f9bbcb27-587d-4994-8ef7-25b9ac6bf912\") " pod="openshift-authentication/oauth-openshift-558db77b4-kw978" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.696169 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f196154c-0442-4600-967f-74eb36876b52-config\") pod \"apiserver-76f77b778f-db49p\" (UID: \"f196154c-0442-4600-967f-74eb36876b52\") " pod="openshift-apiserver/apiserver-76f77b778f-db49p" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.696190 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/467fda03-7ce6-41c9-967c-6e782bd68f90-serving-cert\") pod \"apiserver-7bbb656c7d-tv8br\" (UID: \"467fda03-7ce6-41c9-967c-6e782bd68f90\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-tv8br" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.696216 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/687b3ca2-3a7f-4d1d-be5b-8c3dfe9082e5-trusted-ca\") pod \"ingress-operator-5b745b69d9-n9wgt\" (UID: \"687b3ca2-3a7f-4d1d-be5b-8c3dfe9082e5\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-n9wgt" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.696242 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8pcfl\" (UniqueName: \"kubernetes.io/projected/bf88f9b3-812c-45d1-b8a1-58abe81c9d51-kube-api-access-8pcfl\") pod \"machine-approver-56656f9798-vb9ln\" (UID: \"bf88f9b3-812c-45d1-b8a1-58abe81c9d51\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-vb9ln" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.696263 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kzfhf\" (UniqueName: \"kubernetes.io/projected/f196154c-0442-4600-967f-74eb36876b52-kube-api-access-kzfhf\") pod \"apiserver-76f77b778f-db49p\" (UID: \"f196154c-0442-4600-967f-74eb36876b52\") " pod="openshift-apiserver/apiserver-76f77b778f-db49p" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.696286 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/f9bbcb27-587d-4994-8ef7-25b9ac6bf912-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-kw978\" (UID: \"f9bbcb27-587d-4994-8ef7-25b9ac6bf912\") " pod="openshift-authentication/oauth-openshift-558db77b4-kw978" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.696309 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9bb478ba-d274-4a8f-a67d-635740a72f34-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-wfsmq\" (UID: \"9bb478ba-d274-4a8f-a67d-635740a72f34\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-wfsmq" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.696329 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d9eedcac-7bb5-4ddf-a856-387e2fb253b1-config\") pod \"controller-manager-879f6c89f-4wcbm\" (UID: \"d9eedcac-7bb5-4ddf-a856-387e2fb253b1\") " pod="openshift-controller-manager/controller-manager-879f6c89f-4wcbm" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.696350 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/6c3ed6b4-3ffd-462d-b465-f509ef85858b-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-zrx8s\" (UID: \"6c3ed6b4-3ffd-462d-b465-f509ef85858b\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-zrx8s" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.696975 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/11f4f0ca-cc1a-4f4e-bdb7-071e41be12ee-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-sxxvx\" (UID: \"11f4f0ca-cc1a-4f4e-bdb7-071e41be12ee\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-sxxvx" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.697015 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/467fda03-7ce6-41c9-967c-6e782bd68f90-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-tv8br\" (UID: \"467fda03-7ce6-41c9-967c-6e782bd68f90\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-tv8br" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.697374 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/f196154c-0442-4600-967f-74eb36876b52-image-import-ca\") pod \"apiserver-76f77b778f-db49p\" (UID: \"f196154c-0442-4600-967f-74eb36876b52\") " pod="openshift-apiserver/apiserver-76f77b778f-db49p" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.697675 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/0026b84d-ad3a-41e1-bcd3-9a67f7d64ea0-service-ca-bundle\") pod \"authentication-operator-69f744f599-nptjh\" (UID: \"0026b84d-ad3a-41e1-bcd3-9a67f7d64ea0\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-nptjh" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.697974 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/d9eedcac-7bb5-4ddf-a856-387e2fb253b1-client-ca\") pod \"controller-manager-879f6c89f-4wcbm\" (UID: \"d9eedcac-7bb5-4ddf-a856-387e2fb253b1\") " pod="openshift-controller-manager/controller-manager-879f6c89f-4wcbm" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.698318 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4ae1d839-823d-4694-9159-b32ea65ca9d5-config\") pod \"openshift-apiserver-operator-796bbdcf4f-ndqkx\" (UID: \"4ae1d839-823d-4694-9159-b32ea65ca9d5\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-ndqkx" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.698843 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/f196154c-0442-4600-967f-74eb36876b52-encryption-config\") pod \"apiserver-76f77b778f-db49p\" (UID: \"f196154c-0442-4600-967f-74eb36876b52\") " pod="openshift-apiserver/apiserver-76f77b778f-db49p" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.699110 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-n9wgt"] Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.699210 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-lplcw"] Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.699294 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-xv9zg"] Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.699379 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-dp4kr"] Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.699622 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-g95pt"] Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.699784 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/6c3ed6b4-3ffd-462d-b465-f509ef85858b-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-zrx8s\" (UID: \"6c3ed6b4-3ffd-462d-b465-f509ef85858b\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-zrx8s" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.700002 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-dxlvp" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.700421 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0026b84d-ad3a-41e1-bcd3-9a67f7d64ea0-config\") pod \"authentication-operator-69f744f599-nptjh\" (UID: \"0026b84d-ad3a-41e1-bcd3-9a67f7d64ea0\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-nptjh" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.700469 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/467fda03-7ce6-41c9-967c-6e782bd68f90-audit-dir\") pod \"apiserver-7bbb656c7d-tv8br\" (UID: \"467fda03-7ce6-41c9-967c-6e782bd68f90\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-tv8br" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.701037 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/806f3022-04e6-4246-a1e3-e9f72066aed3-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-8jgvs\" (UID: \"806f3022-04e6-4246-a1e3-e9f72066aed3\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-8jgvs" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.701906 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/79cb45d2-e74c-4bda-8464-2c2bbcdbcc4a-trusted-ca-bundle\") pod \"console-f9d7485db-5lpj9\" (UID: \"79cb45d2-e74c-4bda-8464-2c2bbcdbcc4a\") " pod="openshift-console/console-f9d7485db-5lpj9" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.702110 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/0026b84d-ad3a-41e1-bcd3-9a67f7d64ea0-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-nptjh\" (UID: \"0026b84d-ad3a-41e1-bcd3-9a67f7d64ea0\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-nptjh" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.702254 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/f9bbcb27-587d-4994-8ef7-25b9ac6bf912-audit-policies\") pod \"oauth-openshift-558db77b4-kw978\" (UID: \"f9bbcb27-587d-4994-8ef7-25b9ac6bf912\") " pod="openshift-authentication/oauth-openshift-558db77b4-kw978" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.702546 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/f9bbcb27-587d-4994-8ef7-25b9ac6bf912-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-kw978\" (UID: \"f9bbcb27-587d-4994-8ef7-25b9ac6bf912\") " pod="openshift-authentication/oauth-openshift-558db77b4-kw978" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.702641 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/46ccafd1-b791-4e38-8d42-13aebe2909b2-serving-cert\") pod \"openshift-config-operator-7777fb866f-lhsx5\" (UID: \"46ccafd1-b791-4e38-8d42-13aebe2909b2\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-lhsx5" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.702732 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-9c8v5"] Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.702786 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-jtrv7"] Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.703172 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/f196154c-0442-4600-967f-74eb36876b52-audit\") pod \"apiserver-76f77b778f-db49p\" (UID: \"f196154c-0442-4600-967f-74eb36876b52\") " pod="openshift-apiserver/apiserver-76f77b778f-db49p" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.703429 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/f9bbcb27-587d-4994-8ef7-25b9ac6bf912-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-kw978\" (UID: \"f9bbcb27-587d-4994-8ef7-25b9ac6bf912\") " pod="openshift-authentication/oauth-openshift-558db77b4-kw978" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.703453 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/29c4d9ea-078d-4ed3-a56a-c0a29887b6a1-config\") pod \"machine-api-operator-5694c8668f-wqjfc\" (UID: \"29c4d9ea-078d-4ed3-a56a-c0a29887b6a1\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-wqjfc" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.703708 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/f196154c-0442-4600-967f-74eb36876b52-trusted-ca-bundle\") pod \"apiserver-76f77b778f-db49p\" (UID: \"f196154c-0442-4600-967f-74eb36876b52\") " pod="openshift-apiserver/apiserver-76f77b778f-db49p" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.703761 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/46ccafd1-b791-4e38-8d42-13aebe2909b2-available-featuregates\") pod \"openshift-config-operator-7777fb866f-lhsx5\" (UID: \"46ccafd1-b791-4e38-8d42-13aebe2909b2\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-lhsx5" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.704059 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/6c3ed6b4-3ffd-462d-b465-f509ef85858b-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-zrx8s\" (UID: \"6c3ed6b4-3ffd-462d-b465-f509ef85858b\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-zrx8s" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.704505 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-dxlvp"] Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.704536 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/29c4d9ea-078d-4ed3-a56a-c0a29887b6a1-images\") pod \"machine-api-operator-5694c8668f-wqjfc\" (UID: \"29c4d9ea-078d-4ed3-a56a-c0a29887b6a1\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-wqjfc" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.705060 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/79cb45d2-e74c-4bda-8464-2c2bbcdbcc4a-service-ca\") pod \"console-f9d7485db-5lpj9\" (UID: \"79cb45d2-e74c-4bda-8464-2c2bbcdbcc4a\") " pod="openshift-console/console-f9d7485db-5lpj9" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.705691 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/f9bbcb27-587d-4994-8ef7-25b9ac6bf912-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-kw978\" (UID: \"f9bbcb27-587d-4994-8ef7-25b9ac6bf912\") " pod="openshift-authentication/oauth-openshift-558db77b4-kw978" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.705809 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4ae1d839-823d-4694-9159-b32ea65ca9d5-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-ndqkx\" (UID: \"4ae1d839-823d-4694-9159-b32ea65ca9d5\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-ndqkx" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.705949 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/11f4f0ca-cc1a-4f4e-bdb7-071e41be12ee-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-sxxvx\" (UID: \"11f4f0ca-cc1a-4f4e-bdb7-071e41be12ee\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-sxxvx" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.706181 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/bf88f9b3-812c-45d1-b8a1-58abe81c9d51-auth-proxy-config\") pod \"machine-approver-56656f9798-vb9ln\" (UID: \"bf88f9b3-812c-45d1-b8a1-58abe81c9d51\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-vb9ln" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.706528 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/29c4d9ea-078d-4ed3-a56a-c0a29887b6a1-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-wqjfc\" (UID: \"29c4d9ea-078d-4ed3-a56a-c0a29887b6a1\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-wqjfc" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.706795 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f196154c-0442-4600-967f-74eb36876b52-serving-cert\") pod \"apiserver-76f77b778f-db49p\" (UID: \"f196154c-0442-4600-967f-74eb36876b52\") " pod="openshift-apiserver/apiserver-76f77b778f-db49p" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.706805 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-kzzsd" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.706967 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-48l5m"] Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.707082 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-cs7dc"] Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.707235 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/79cb45d2-e74c-4bda-8464-2c2bbcdbcc4a-oauth-serving-cert\") pod \"console-f9d7485db-5lpj9\" (UID: \"79cb45d2-e74c-4bda-8464-2c2bbcdbcc4a\") " pod="openshift-console/console-f9d7485db-5lpj9" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.707332 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/f196154c-0442-4600-967f-74eb36876b52-node-pullsecrets\") pod \"apiserver-76f77b778f-db49p\" (UID: \"f196154c-0442-4600-967f-74eb36876b52\") " pod="openshift-apiserver/apiserver-76f77b778f-db49p" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.707831 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/f9bbcb27-587d-4994-8ef7-25b9ac6bf912-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-kw978\" (UID: \"f9bbcb27-587d-4994-8ef7-25b9ac6bf912\") " pod="openshift-authentication/oauth-openshift-558db77b4-kw978" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.708055 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/79cb45d2-e74c-4bda-8464-2c2bbcdbcc4a-console-oauth-config\") pod \"console-f9d7485db-5lpj9\" (UID: \"79cb45d2-e74c-4bda-8464-2c2bbcdbcc4a\") " pod="openshift-console/console-f9d7485db-5lpj9" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.708247 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/f9bbcb27-587d-4994-8ef7-25b9ac6bf912-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-kw978\" (UID: \"f9bbcb27-587d-4994-8ef7-25b9ac6bf912\") " pod="openshift-authentication/oauth-openshift-558db77b4-kw978" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.708738 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/467fda03-7ce6-41c9-967c-6e782bd68f90-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-tv8br\" (UID: \"467fda03-7ce6-41c9-967c-6e782bd68f90\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-tv8br" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.708845 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/f196154c-0442-4600-967f-74eb36876b52-etcd-serving-ca\") pod \"apiserver-76f77b778f-db49p\" (UID: \"f196154c-0442-4600-967f-74eb36876b52\") " pod="openshift-apiserver/apiserver-76f77b778f-db49p" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.709471 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/d9eedcac-7bb5-4ddf-a856-387e2fb253b1-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-4wcbm\" (UID: \"d9eedcac-7bb5-4ddf-a856-387e2fb253b1\") " pod="openshift-controller-manager/controller-manager-879f6c89f-4wcbm" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.709643 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9398ffbc-b6a8-4294-83d3-1c80dd716113-config\") pod \"console-operator-58897d9998-k8p2d\" (UID: \"9398ffbc-b6a8-4294-83d3-1c80dd716113\") " pod="openshift-console-operator/console-operator-58897d9998-k8p2d" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.709764 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/f9bbcb27-587d-4994-8ef7-25b9ac6bf912-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-kw978\" (UID: \"f9bbcb27-587d-4994-8ef7-25b9ac6bf912\") " pod="openshift-authentication/oauth-openshift-558db77b4-kw978" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.710023 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/79cb45d2-e74c-4bda-8464-2c2bbcdbcc4a-console-serving-cert\") pod \"console-f9d7485db-5lpj9\" (UID: \"79cb45d2-e74c-4bda-8464-2c2bbcdbcc4a\") " pod="openshift-console/console-f9d7485db-5lpj9" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.710191 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f196154c-0442-4600-967f-74eb36876b52-audit-dir\") pod \"apiserver-76f77b778f-db49p\" (UID: \"f196154c-0442-4600-967f-74eb36876b52\") " pod="openshift-apiserver/apiserver-76f77b778f-db49p" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.711075 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/79cb45d2-e74c-4bda-8464-2c2bbcdbcc4a-console-config\") pod \"console-f9d7485db-5lpj9\" (UID: \"79cb45d2-e74c-4bda-8464-2c2bbcdbcc4a\") " pod="openshift-console/console-f9d7485db-5lpj9" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.711170 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d9eedcac-7bb5-4ddf-a856-387e2fb253b1-config\") pod \"controller-manager-879f6c89f-4wcbm\" (UID: \"d9eedcac-7bb5-4ddf-a856-387e2fb253b1\") " pod="openshift-controller-manager/controller-manager-879f6c89f-4wcbm" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.711271 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/f9bbcb27-587d-4994-8ef7-25b9ac6bf912-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-kw978\" (UID: \"f9bbcb27-587d-4994-8ef7-25b9ac6bf912\") " pod="openshift-authentication/oauth-openshift-558db77b4-kw978" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.711339 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-68b7z"] Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.712265 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9398ffbc-b6a8-4294-83d3-1c80dd716113-serving-cert\") pod \"console-operator-58897d9998-k8p2d\" (UID: \"9398ffbc-b6a8-4294-83d3-1c80dd716113\") " pod="openshift-console-operator/console-operator-58897d9998-k8p2d" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.712586 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d9eedcac-7bb5-4ddf-a856-387e2fb253b1-serving-cert\") pod \"controller-manager-879f6c89f-4wcbm\" (UID: \"d9eedcac-7bb5-4ddf-a856-387e2fb253b1\") " pod="openshift-controller-manager/controller-manager-879f6c89f-4wcbm" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.712749 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9398ffbc-b6a8-4294-83d3-1c80dd716113-trusted-ca\") pod \"console-operator-58897d9998-k8p2d\" (UID: \"9398ffbc-b6a8-4294-83d3-1c80dd716113\") " pod="openshift-console-operator/console-operator-58897d9998-k8p2d" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.712920 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/f9bbcb27-587d-4994-8ef7-25b9ac6bf912-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-kw978\" (UID: \"f9bbcb27-587d-4994-8ef7-25b9ac6bf912\") " pod="openshift-authentication/oauth-openshift-558db77b4-kw978" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.713376 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/467fda03-7ce6-41c9-967c-6e782bd68f90-audit-policies\") pod \"apiserver-7bbb656c7d-tv8br\" (UID: \"467fda03-7ce6-41c9-967c-6e782bd68f90\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-tv8br" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.713592 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0026b84d-ad3a-41e1-bcd3-9a67f7d64ea0-serving-cert\") pod \"authentication-operator-69f744f599-nptjh\" (UID: \"0026b84d-ad3a-41e1-bcd3-9a67f7d64ea0\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-nptjh" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.713723 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/f196154c-0442-4600-967f-74eb36876b52-etcd-client\") pod \"apiserver-76f77b778f-db49p\" (UID: \"f196154c-0442-4600-967f-74eb36876b52\") " pod="openshift-apiserver/apiserver-76f77b778f-db49p" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.714634 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/f9bbcb27-587d-4994-8ef7-25b9ac6bf912-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-kw978\" (UID: \"f9bbcb27-587d-4994-8ef7-25b9ac6bf912\") " pod="openshift-authentication/oauth-openshift-558db77b4-kw978" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.714651 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/f9bbcb27-587d-4994-8ef7-25b9ac6bf912-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-kw978\" (UID: \"f9bbcb27-587d-4994-8ef7-25b9ac6bf912\") " pod="openshift-authentication/oauth-openshift-558db77b4-kw978" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.714865 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/467fda03-7ce6-41c9-967c-6e782bd68f90-etcd-client\") pod \"apiserver-7bbb656c7d-tv8br\" (UID: \"467fda03-7ce6-41c9-967c-6e782bd68f90\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-tv8br" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.715381 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/bf88f9b3-812c-45d1-b8a1-58abe81c9d51-machine-approver-tls\") pod \"machine-approver-56656f9798-vb9ln\" (UID: \"bf88f9b3-812c-45d1-b8a1-58abe81c9d51\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-vb9ln" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.716195 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/f9bbcb27-587d-4994-8ef7-25b9ac6bf912-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-kw978\" (UID: \"f9bbcb27-587d-4994-8ef7-25b9ac6bf912\") " pod="openshift-authentication/oauth-openshift-558db77b4-kw978" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.716371 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-64mrh"] Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.716925 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/467fda03-7ce6-41c9-967c-6e782bd68f90-serving-cert\") pod \"apiserver-7bbb656c7d-tv8br\" (UID: \"467fda03-7ce6-41c9-967c-6e782bd68f90\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-tv8br" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.717638 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-rdggm"] Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.718005 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e2211eec-aa3e-429f-9237-fc13fad065ed-config\") pod \"kube-apiserver-operator-766d6c64bb-mdkrr\" (UID: \"e2211eec-aa3e-429f-9237-fc13fad065ed\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-mdkrr" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.718062 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f196154c-0442-4600-967f-74eb36876b52-config\") pod \"apiserver-76f77b778f-db49p\" (UID: \"f196154c-0442-4600-967f-74eb36876b52\") " pod="openshift-apiserver/apiserver-76f77b778f-db49p" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.718505 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e2211eec-aa3e-429f-9237-fc13fad065ed-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-mdkrr\" (UID: \"e2211eec-aa3e-429f-9237-fc13fad065ed\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-mdkrr" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.718675 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/467fda03-7ce6-41c9-967c-6e782bd68f90-encryption-config\") pod \"apiserver-7bbb656c7d-tv8br\" (UID: \"467fda03-7ce6-41c9-967c-6e782bd68f90\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-tv8br" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.718837 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns/dns-default-z5h6j"] Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.721477 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-z5h6j" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.721878 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-server-98vrc"] Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.722726 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-98vrc" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.723967 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-z5h6j"] Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.724989 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-dockercfg-k9rxt" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.744296 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-tls" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.764427 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"installation-pull-secrets" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.784350 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-metrics-certs-default" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.798108 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t7p2m\" (UniqueName: \"kubernetes.io/projected/9bb478ba-d274-4a8f-a67d-635740a72f34-kube-api-access-t7p2m\") pod \"kube-storage-version-migrator-operator-b67b599dd-wfsmq\" (UID: \"9bb478ba-d274-4a8f-a67d-635740a72f34\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-wfsmq" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.798154 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/b193848f-5963-432c-964b-460721f701c3-metrics-tls\") pod \"dns-operator-744455d44c-qv29k\" (UID: \"b193848f-5963-432c-964b-460721f701c3\") " pod="openshift-dns-operator/dns-operator-744455d44c-qv29k" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.798218 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/687b3ca2-3a7f-4d1d-be5b-8c3dfe9082e5-bound-sa-token\") pod \"ingress-operator-5b745b69d9-n9wgt\" (UID: \"687b3ca2-3a7f-4d1d-be5b-8c3dfe9082e5\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-n9wgt" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.798258 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ql96p\" (UniqueName: \"kubernetes.io/projected/b193848f-5963-432c-964b-460721f701c3-kube-api-access-ql96p\") pod \"dns-operator-744455d44c-qv29k\" (UID: \"b193848f-5963-432c-964b-460721f701c3\") " pod="openshift-dns-operator/dns-operator-744455d44c-qv29k" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.798277 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ghqn2\" (UniqueName: \"kubernetes.io/projected/687b3ca2-3a7f-4d1d-be5b-8c3dfe9082e5-kube-api-access-ghqn2\") pod \"ingress-operator-5b745b69d9-n9wgt\" (UID: \"687b3ca2-3a7f-4d1d-be5b-8c3dfe9082e5\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-n9wgt" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.798312 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/687b3ca2-3a7f-4d1d-be5b-8c3dfe9082e5-metrics-tls\") pod \"ingress-operator-5b745b69d9-n9wgt\" (UID: \"687b3ca2-3a7f-4d1d-be5b-8c3dfe9082e5\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-n9wgt" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.798352 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9bb478ba-d274-4a8f-a67d-635740a72f34-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-wfsmq\" (UID: \"9bb478ba-d274-4a8f-a67d-635740a72f34\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-wfsmq" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.798412 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/687b3ca2-3a7f-4d1d-be5b-8c3dfe9082e5-trusted-ca\") pod \"ingress-operator-5b745b69d9-n9wgt\" (UID: \"687b3ca2-3a7f-4d1d-be5b-8c3dfe9082e5\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-n9wgt" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.798440 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9bb478ba-d274-4a8f-a67d-635740a72f34-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-wfsmq\" (UID: \"9bb478ba-d274-4a8f-a67d-635740a72f34\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-wfsmq" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.801097 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/b193848f-5963-432c-964b-460721f701c3-metrics-tls\") pod \"dns-operator-744455d44c-qv29k\" (UID: \"b193848f-5963-432c-964b-460721f701c3\") " pod="openshift-dns-operator/dns-operator-744455d44c-qv29k" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.803968 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-certs-default" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.824181 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-stats-default" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.864763 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-dockercfg-zdk86" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.884656 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.905080 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"default-dockercfg-chnjx" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.924480 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"service-ca-bundle" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.944450 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"kube-root-ca.crt" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.964897 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" Oct 11 03:57:24 crc kubenswrapper[4798]: I1011 03:57:24.983966 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator"/"kube-storage-version-migrator-sa-dockercfg-5xfcg" Oct 11 03:57:25 crc kubenswrapper[4798]: I1011 03:57:25.004160 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"kube-root-ca.crt" Oct 11 03:57:25 crc kubenswrapper[4798]: I1011 03:57:25.024636 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"openshift-service-ca.crt" Oct 11 03:57:25 crc kubenswrapper[4798]: I1011 03:57:25.045649 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" Oct 11 03:57:25 crc kubenswrapper[4798]: I1011 03:57:25.065782 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-dockercfg-r9srn" Oct 11 03:57:25 crc kubenswrapper[4798]: I1011 03:57:25.084332 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-serving-cert" Oct 11 03:57:25 crc kubenswrapper[4798]: I1011 03:57:25.105023 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-client" Oct 11 03:57:25 crc kubenswrapper[4798]: I1011 03:57:25.125111 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-operator-config" Oct 11 03:57:25 crc kubenswrapper[4798]: I1011 03:57:25.145971 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-ca-bundle" Oct 11 03:57:25 crc kubenswrapper[4798]: I1011 03:57:25.165478 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-service-ca-bundle" Oct 11 03:57:25 crc kubenswrapper[4798]: I1011 03:57:25.184591 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"kube-root-ca.crt" Oct 11 03:57:25 crc kubenswrapper[4798]: I1011 03:57:25.206320 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"kube-storage-version-migrator-operator-dockercfg-2bh8d" Oct 11 03:57:25 crc kubenswrapper[4798]: I1011 03:57:25.225241 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"serving-cert" Oct 11 03:57:25 crc kubenswrapper[4798]: I1011 03:57:25.233071 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9bb478ba-d274-4a8f-a67d-635740a72f34-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-wfsmq\" (UID: \"9bb478ba-d274-4a8f-a67d-635740a72f34\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-wfsmq" Oct 11 03:57:25 crc kubenswrapper[4798]: I1011 03:57:25.244234 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" Oct 11 03:57:25 crc kubenswrapper[4798]: I1011 03:57:25.264744 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"config" Oct 11 03:57:25 crc kubenswrapper[4798]: I1011 03:57:25.269429 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9bb478ba-d274-4a8f-a67d-635740a72f34-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-wfsmq\" (UID: \"9bb478ba-d274-4a8f-a67d-635740a72f34\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-wfsmq" Oct 11 03:57:25 crc kubenswrapper[4798]: I1011 03:57:25.284854 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"metrics-tls" Oct 11 03:57:25 crc kubenswrapper[4798]: I1011 03:57:25.292825 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/687b3ca2-3a7f-4d1d-be5b-8c3dfe9082e5-metrics-tls\") pod \"ingress-operator-5b745b69d9-n9wgt\" (UID: \"687b3ca2-3a7f-4d1d-be5b-8c3dfe9082e5\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-n9wgt" Oct 11 03:57:25 crc kubenswrapper[4798]: I1011 03:57:25.304923 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"kube-root-ca.crt" Oct 11 03:57:25 crc kubenswrapper[4798]: I1011 03:57:25.325793 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"openshift-service-ca.crt" Oct 11 03:57:25 crc kubenswrapper[4798]: I1011 03:57:25.350665 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"trusted-ca" Oct 11 03:57:25 crc kubenswrapper[4798]: I1011 03:57:25.360611 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/687b3ca2-3a7f-4d1d-be5b-8c3dfe9082e5-trusted-ca\") pod \"ingress-operator-5b745b69d9-n9wgt\" (UID: \"687b3ca2-3a7f-4d1d-be5b-8c3dfe9082e5\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-n9wgt" Oct 11 03:57:25 crc kubenswrapper[4798]: I1011 03:57:25.364132 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mcc-proxy-tls" Oct 11 03:57:25 crc kubenswrapper[4798]: I1011 03:57:25.386641 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-controller-dockercfg-c2lfx" Oct 11 03:57:25 crc kubenswrapper[4798]: I1011 03:57:25.406571 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"ingress-operator-dockercfg-7lnqk" Oct 11 03:57:25 crc kubenswrapper[4798]: I1011 03:57:25.425553 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-admission-controller-secret" Oct 11 03:57:25 crc kubenswrapper[4798]: I1011 03:57:25.445965 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ac-dockercfg-9lkdf" Oct 11 03:57:25 crc kubenswrapper[4798]: I1011 03:57:25.465108 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" Oct 11 03:57:25 crc kubenswrapper[4798]: I1011 03:57:25.485339 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serviceaccount-dockercfg-rq7zk" Oct 11 03:57:25 crc kubenswrapper[4798]: I1011 03:57:25.504806 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" Oct 11 03:57:25 crc kubenswrapper[4798]: I1011 03:57:25.525900 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"pprof-cert" Oct 11 03:57:25 crc kubenswrapper[4798]: I1011 03:57:25.545266 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"kube-root-ca.crt" Oct 11 03:57:25 crc kubenswrapper[4798]: I1011 03:57:25.564618 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Oct 11 03:57:25 crc kubenswrapper[4798]: I1011 03:57:25.593420 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Oct 11 03:57:25 crc kubenswrapper[4798]: I1011 03:57:25.605711 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Oct 11 03:57:25 crc kubenswrapper[4798]: I1011 03:57:25.623228 4798 request.go:700] Waited for 1.011035232s due to client-side throttling, not priority and fairness, request: GET:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/secrets?fieldSelector=metadata.name%3Dmarketplace-operator-dockercfg-5nsgg&limit=500&resourceVersion=0 Oct 11 03:57:25 crc kubenswrapper[4798]: I1011 03:57:25.625466 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-dockercfg-5nsgg" Oct 11 03:57:25 crc kubenswrapper[4798]: I1011 03:57:25.646033 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Oct 11 03:57:25 crc kubenswrapper[4798]: I1011 03:57:25.684715 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-root-ca.crt" Oct 11 03:57:25 crc kubenswrapper[4798]: I1011 03:57:25.704624 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" Oct 11 03:57:25 crc kubenswrapper[4798]: I1011 03:57:25.725145 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-dockercfg-gkqpw" Oct 11 03:57:25 crc kubenswrapper[4798]: I1011 03:57:25.745079 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" Oct 11 03:57:25 crc kubenswrapper[4798]: I1011 03:57:25.765609 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" Oct 11 03:57:25 crc kubenswrapper[4798]: I1011 03:57:25.785342 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"machine-config-operator-images" Oct 11 03:57:25 crc kubenswrapper[4798]: I1011 03:57:25.804562 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-operator-dockercfg-98p87" Oct 11 03:57:25 crc kubenswrapper[4798]: I1011 03:57:25.825243 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mco-proxy-tls" Oct 11 03:57:25 crc kubenswrapper[4798]: I1011 03:57:25.845380 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" Oct 11 03:57:25 crc kubenswrapper[4798]: I1011 03:57:25.864027 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"packageserver-service-cert" Oct 11 03:57:25 crc kubenswrapper[4798]: I1011 03:57:25.884939 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Oct 11 03:57:25 crc kubenswrapper[4798]: I1011 03:57:25.905510 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Oct 11 03:57:25 crc kubenswrapper[4798]: I1011 03:57:25.924020 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Oct 11 03:57:25 crc kubenswrapper[4798]: I1011 03:57:25.944425 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"service-ca-dockercfg-pn86c" Oct 11 03:57:25 crc kubenswrapper[4798]: I1011 03:57:25.964474 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"signing-key" Oct 11 03:57:25 crc kubenswrapper[4798]: I1011 03:57:25.983846 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"signing-cabundle" Oct 11 03:57:26 crc kubenswrapper[4798]: I1011 03:57:26.004915 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Oct 11 03:57:26 crc kubenswrapper[4798]: I1011 03:57:26.024171 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Oct 11 03:57:26 crc kubenswrapper[4798]: I1011 03:57:26.045208 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Oct 11 03:57:26 crc kubenswrapper[4798]: I1011 03:57:26.064646 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Oct 11 03:57:26 crc kubenswrapper[4798]: I1011 03:57:26.085045 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Oct 11 03:57:26 crc kubenswrapper[4798]: I1011 03:57:26.105736 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Oct 11 03:57:26 crc kubenswrapper[4798]: I1011 03:57:26.124695 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Oct 11 03:57:26 crc kubenswrapper[4798]: I1011 03:57:26.144793 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"service-ca-operator-dockercfg-rg9jl" Oct 11 03:57:26 crc kubenswrapper[4798]: I1011 03:57:26.165078 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"openshift-service-ca.crt" Oct 11 03:57:26 crc kubenswrapper[4798]: I1011 03:57:26.184209 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"serving-cert" Oct 11 03:57:26 crc kubenswrapper[4798]: I1011 03:57:26.205374 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"kube-root-ca.crt" Oct 11 03:57:26 crc kubenswrapper[4798]: I1011 03:57:26.226360 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"service-ca-operator-config" Oct 11 03:57:26 crc kubenswrapper[4798]: I1011 03:57:26.244558 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"openshift-service-ca.crt" Oct 11 03:57:26 crc kubenswrapper[4798]: I1011 03:57:26.265626 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"canary-serving-cert" Oct 11 03:57:26 crc kubenswrapper[4798]: I1011 03:57:26.284929 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"default-dockercfg-2llfx" Oct 11 03:57:26 crc kubenswrapper[4798]: I1011 03:57:26.305061 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"kube-root-ca.crt" Oct 11 03:57:26 crc kubenswrapper[4798]: I1011 03:57:26.344073 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p5tj6\" (UniqueName: \"kubernetes.io/projected/29c4d9ea-078d-4ed3-a56a-c0a29887b6a1-kube-api-access-p5tj6\") pod \"machine-api-operator-5694c8668f-wqjfc\" (UID: \"29c4d9ea-078d-4ed3-a56a-c0a29887b6a1\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-wqjfc" Oct 11 03:57:26 crc kubenswrapper[4798]: I1011 03:57:26.364754 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9wqdz\" (UniqueName: \"kubernetes.io/projected/467fda03-7ce6-41c9-967c-6e782bd68f90-kube-api-access-9wqdz\") pod \"apiserver-7bbb656c7d-tv8br\" (UID: \"467fda03-7ce6-41c9-967c-6e782bd68f90\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-tv8br" Oct 11 03:57:26 crc kubenswrapper[4798]: I1011 03:57:26.384975 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-49z9v\" (UniqueName: \"kubernetes.io/projected/806f3022-04e6-4246-a1e3-e9f72066aed3-kube-api-access-49z9v\") pod \"cluster-samples-operator-665b6dd947-8jgvs\" (UID: \"806f3022-04e6-4246-a1e3-e9f72066aed3\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-8jgvs" Oct 11 03:57:26 crc kubenswrapper[4798]: I1011 03:57:26.401725 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xrfgj\" (UniqueName: \"kubernetes.io/projected/4ae1d839-823d-4694-9159-b32ea65ca9d5-kube-api-access-xrfgj\") pod \"openshift-apiserver-operator-796bbdcf4f-ndqkx\" (UID: \"4ae1d839-823d-4694-9159-b32ea65ca9d5\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-ndqkx" Oct 11 03:57:26 crc kubenswrapper[4798]: I1011 03:57:26.421675 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mkrbf\" (UniqueName: \"kubernetes.io/projected/6c3ed6b4-3ffd-462d-b465-f509ef85858b-kube-api-access-mkrbf\") pod \"cluster-image-registry-operator-dc59b4c8b-zrx8s\" (UID: \"6c3ed6b4-3ffd-462d-b465-f509ef85858b\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-zrx8s" Oct 11 03:57:26 crc kubenswrapper[4798]: I1011 03:57:26.441443 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/11f4f0ca-cc1a-4f4e-bdb7-071e41be12ee-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-sxxvx\" (UID: \"11f4f0ca-cc1a-4f4e-bdb7-071e41be12ee\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-sxxvx" Oct 11 03:57:26 crc kubenswrapper[4798]: I1011 03:57:26.444775 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"openshift-service-ca.crt" Oct 11 03:57:26 crc kubenswrapper[4798]: I1011 03:57:26.464968 4798 reflector.go:368] Caches populated for *v1.Secret from object-"hostpath-provisioner"/"csi-hostpath-provisioner-sa-dockercfg-qd74k" Oct 11 03:57:26 crc kubenswrapper[4798]: I1011 03:57:26.483837 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"kube-root-ca.crt" Oct 11 03:57:26 crc kubenswrapper[4798]: I1011 03:57:26.493125 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-sxxvx" Oct 11 03:57:26 crc kubenswrapper[4798]: I1011 03:57:26.525734 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sk8z8\" (UniqueName: \"kubernetes.io/projected/9398ffbc-b6a8-4294-83d3-1c80dd716113-kube-api-access-sk8z8\") pod \"console-operator-58897d9998-k8p2d\" (UID: \"9398ffbc-b6a8-4294-83d3-1c80dd716113\") " pod="openshift-console-operator/console-operator-58897d9998-k8p2d" Oct 11 03:57:26 crc kubenswrapper[4798]: I1011 03:57:26.543373 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e2211eec-aa3e-429f-9237-fc13fad065ed-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-mdkrr\" (UID: \"e2211eec-aa3e-429f-9237-fc13fad065ed\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-mdkrr" Oct 11 03:57:26 crc kubenswrapper[4798]: I1011 03:57:26.552182 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-tv8br" Oct 11 03:57:26 crc kubenswrapper[4798]: I1011 03:57:26.573685 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-wqjfc" Oct 11 03:57:26 crc kubenswrapper[4798]: I1011 03:57:26.573906 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tvqph\" (UniqueName: \"kubernetes.io/projected/0026b84d-ad3a-41e1-bcd3-9a67f7d64ea0-kube-api-access-tvqph\") pod \"authentication-operator-69f744f599-nptjh\" (UID: \"0026b84d-ad3a-41e1-bcd3-9a67f7d64ea0\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-nptjh" Oct 11 03:57:26 crc kubenswrapper[4798]: I1011 03:57:26.576854 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-ndqkx" Oct 11 03:57:26 crc kubenswrapper[4798]: I1011 03:57:26.580316 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/6c3ed6b4-3ffd-462d-b465-f509ef85858b-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-zrx8s\" (UID: \"6c3ed6b4-3ffd-462d-b465-f509ef85858b\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-zrx8s" Oct 11 03:57:26 crc kubenswrapper[4798]: I1011 03:57:26.594425 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-8jgvs" Oct 11 03:57:26 crc kubenswrapper[4798]: I1011 03:57:26.603833 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6hs4f\" (UniqueName: \"kubernetes.io/projected/f9bbcb27-587d-4994-8ef7-25b9ac6bf912-kube-api-access-6hs4f\") pod \"oauth-openshift-558db77b4-kw978\" (UID: \"f9bbcb27-587d-4994-8ef7-25b9ac6bf912\") " pod="openshift-authentication/oauth-openshift-558db77b4-kw978" Oct 11 03:57:26 crc kubenswrapper[4798]: I1011 03:57:26.623005 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-frdtx\" (UniqueName: \"kubernetes.io/projected/d9eedcac-7bb5-4ddf-a856-387e2fb253b1-kube-api-access-frdtx\") pod \"controller-manager-879f6c89f-4wcbm\" (UID: \"d9eedcac-7bb5-4ddf-a856-387e2fb253b1\") " pod="openshift-controller-manager/controller-manager-879f6c89f-4wcbm" Oct 11 03:57:26 crc kubenswrapper[4798]: I1011 03:57:26.642508 4798 request.go:700] Waited for 1.933978731s due to client-side throttling, not priority and fairness, request: POST:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-cluster-machine-approver/serviceaccounts/machine-approver-sa/token Oct 11 03:57:26 crc kubenswrapper[4798]: I1011 03:57:26.643838 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-4wcbm" Oct 11 03:57:26 crc kubenswrapper[4798]: I1011 03:57:26.649607 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4pjjx\" (UniqueName: \"kubernetes.io/projected/79cb45d2-e74c-4bda-8464-2c2bbcdbcc4a-kube-api-access-4pjjx\") pod \"console-f9d7485db-5lpj9\" (UID: \"79cb45d2-e74c-4bda-8464-2c2bbcdbcc4a\") " pod="openshift-console/console-f9d7485db-5lpj9" Oct 11 03:57:26 crc kubenswrapper[4798]: I1011 03:57:26.659699 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-5lpj9" Oct 11 03:57:26 crc kubenswrapper[4798]: I1011 03:57:26.667237 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8pcfl\" (UniqueName: \"kubernetes.io/projected/bf88f9b3-812c-45d1-b8a1-58abe81c9d51-kube-api-access-8pcfl\") pod \"machine-approver-56656f9798-vb9ln\" (UID: \"bf88f9b3-812c-45d1-b8a1-58abe81c9d51\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-vb9ln" Oct 11 03:57:26 crc kubenswrapper[4798]: I1011 03:57:26.689986 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-vb9ln" Oct 11 03:57:26 crc kubenswrapper[4798]: I1011 03:57:26.702635 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kzfhf\" (UniqueName: \"kubernetes.io/projected/f196154c-0442-4600-967f-74eb36876b52-kube-api-access-kzfhf\") pod \"apiserver-76f77b778f-db49p\" (UID: \"f196154c-0442-4600-967f-74eb36876b52\") " pod="openshift-apiserver/apiserver-76f77b778f-db49p" Oct 11 03:57:26 crc kubenswrapper[4798]: I1011 03:57:26.704507 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-kw978" Oct 11 03:57:26 crc kubenswrapper[4798]: I1011 03:57:26.705501 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"dns-default" Oct 11 03:57:26 crc kubenswrapper[4798]: I1011 03:57:26.711923 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-k8p2d" Oct 11 03:57:26 crc kubenswrapper[4798]: I1011 03:57:26.716175 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mndjz\" (UniqueName: \"kubernetes.io/projected/46ccafd1-b791-4e38-8d42-13aebe2909b2-kube-api-access-mndjz\") pod \"openshift-config-operator-7777fb866f-lhsx5\" (UID: \"46ccafd1-b791-4e38-8d42-13aebe2909b2\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-lhsx5" Oct 11 03:57:26 crc kubenswrapper[4798]: I1011 03:57:26.727232 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-nptjh" Oct 11 03:57:26 crc kubenswrapper[4798]: I1011 03:57:26.727595 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-dockercfg-jwfmh" Oct 11 03:57:26 crc kubenswrapper[4798]: I1011 03:57:26.745130 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-default-metrics-tls" Oct 11 03:57:26 crc kubenswrapper[4798]: I1011 03:57:26.760647 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-zrx8s" Oct 11 03:57:26 crc kubenswrapper[4798]: I1011 03:57:26.761571 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-mdkrr" Oct 11 03:57:26 crc kubenswrapper[4798]: I1011 03:57:26.765513 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-tls" Oct 11 03:57:26 crc kubenswrapper[4798]: I1011 03:57:26.768624 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-lhsx5" Oct 11 03:57:26 crc kubenswrapper[4798]: I1011 03:57:26.784503 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"node-bootstrapper-token" Oct 11 03:57:26 crc kubenswrapper[4798]: I1011 03:57:26.805708 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-dockercfg-qx5rd" Oct 11 03:57:26 crc kubenswrapper[4798]: I1011 03:57:26.844463 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-db49p" Oct 11 03:57:26 crc kubenswrapper[4798]: I1011 03:57:26.852556 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t7p2m\" (UniqueName: \"kubernetes.io/projected/9bb478ba-d274-4a8f-a67d-635740a72f34-kube-api-access-t7p2m\") pod \"kube-storage-version-migrator-operator-b67b599dd-wfsmq\" (UID: \"9bb478ba-d274-4a8f-a67d-635740a72f34\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-wfsmq" Oct 11 03:57:26 crc kubenswrapper[4798]: I1011 03:57:26.862964 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/687b3ca2-3a7f-4d1d-be5b-8c3dfe9082e5-bound-sa-token\") pod \"ingress-operator-5b745b69d9-n9wgt\" (UID: \"687b3ca2-3a7f-4d1d-be5b-8c3dfe9082e5\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-n9wgt" Oct 11 03:57:26 crc kubenswrapper[4798]: I1011 03:57:26.884175 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ql96p\" (UniqueName: \"kubernetes.io/projected/b193848f-5963-432c-964b-460721f701c3-kube-api-access-ql96p\") pod \"dns-operator-744455d44c-qv29k\" (UID: \"b193848f-5963-432c-964b-460721f701c3\") " pod="openshift-dns-operator/dns-operator-744455d44c-qv29k" Oct 11 03:57:26 crc kubenswrapper[4798]: I1011 03:57:26.902438 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ghqn2\" (UniqueName: \"kubernetes.io/projected/687b3ca2-3a7f-4d1d-be5b-8c3dfe9082e5-kube-api-access-ghqn2\") pod \"ingress-operator-5b745b69d9-n9wgt\" (UID: \"687b3ca2-3a7f-4d1d-be5b-8c3dfe9082e5\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-n9wgt" Oct 11 03:57:26 crc kubenswrapper[4798]: I1011 03:57:26.916771 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-tv8br"] Oct 11 03:57:26 crc kubenswrapper[4798]: I1011 03:57:26.917052 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-wfsmq" Oct 11 03:57:26 crc kubenswrapper[4798]: I1011 03:57:26.928505 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-n9wgt" Oct 11 03:57:26 crc kubenswrapper[4798]: I1011 03:57:26.970902 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-8jgvs"] Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.001925 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-sxxvx"] Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.031202 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f8n5k\" (UniqueName: \"kubernetes.io/projected/550d113c-cbae-420d-a970-3a266a94134c-kube-api-access-f8n5k\") pod \"machine-config-controller-84d6567774-lplcw\" (UID: \"550d113c-cbae-420d-a970-3a266a94134c\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-lplcw" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.031261 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qncpc\" (UniqueName: \"kubernetes.io/projected/7965ee7e-8332-46c1-aff0-c2d96ea976dc-kube-api-access-qncpc\") pod \"migrator-59844c95c7-g52t6\" (UID: \"7965ee7e-8332-46c1-aff0-c2d96ea976dc\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-g52t6" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.031340 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tgcgj\" (UniqueName: \"kubernetes.io/projected/55408d92-301d-4b54-9cb1-09dc11423c33-kube-api-access-tgcgj\") pod \"downloads-7954f5f757-g2k9v\" (UID: \"55408d92-301d-4b54-9cb1-09dc11423c33\") " pod="openshift-console/downloads-7954f5f757-g2k9v" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.031364 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/3de03a8d-34d7-416d-8f1b-330ad429a3b1-bound-sa-token\") pod \"image-registry-697d97f7c8-bdw6s\" (UID: \"3de03a8d-34d7-416d-8f1b-330ad429a3b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-bdw6s" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.031438 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/76d9d582-6e5f-4146-859a-5474e90dbe64-config\") pod \"etcd-operator-b45778765-xv9zg\" (UID: \"76d9d582-6e5f-4146-859a-5474e90dbe64\") " pod="openshift-etcd-operator/etcd-operator-b45778765-xv9zg" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.031476 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/78405f10-b1f1-4401-8a3e-579ff5a739e1-metrics-certs\") pod \"router-default-5444994796-kgrgq\" (UID: \"78405f10-b1f1-4401-8a3e-579ff5a739e1\") " pod="openshift-ingress/router-default-5444994796-kgrgq" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.031528 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/550d113c-cbae-420d-a970-3a266a94134c-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-lplcw\" (UID: \"550d113c-cbae-420d-a970-3a266a94134c\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-lplcw" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.031597 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/3de03a8d-34d7-416d-8f1b-330ad429a3b1-ca-trust-extracted\") pod \"image-registry-697d97f7c8-bdw6s\" (UID: \"3de03a8d-34d7-416d-8f1b-330ad429a3b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-bdw6s" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.031648 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/3de03a8d-34d7-416d-8f1b-330ad429a3b1-registry-tls\") pod \"image-registry-697d97f7c8-bdw6s\" (UID: \"3de03a8d-34d7-416d-8f1b-330ad429a3b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-bdw6s" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.031702 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xfnw9\" (UniqueName: \"kubernetes.io/projected/45c5e131-b2e8-4a3f-8c26-0dfe5cacee11-kube-api-access-xfnw9\") pod \"marketplace-operator-79b997595-dp4kr\" (UID: \"45c5e131-b2e8-4a3f-8c26-0dfe5cacee11\") " pod="openshift-marketplace/marketplace-operator-79b997595-dp4kr" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.031725 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zpbbg\" (UniqueName: \"kubernetes.io/projected/78405f10-b1f1-4401-8a3e-579ff5a739e1-kube-api-access-zpbbg\") pod \"router-default-5444994796-kgrgq\" (UID: \"78405f10-b1f1-4401-8a3e-579ff5a739e1\") " pod="openshift-ingress/router-default-5444994796-kgrgq" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.031774 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/3de03a8d-34d7-416d-8f1b-330ad429a3b1-registry-certificates\") pod \"image-registry-697d97f7c8-bdw6s\" (UID: \"3de03a8d-34d7-416d-8f1b-330ad429a3b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-bdw6s" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.031796 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/3de03a8d-34d7-416d-8f1b-330ad429a3b1-installation-pull-secrets\") pod \"image-registry-697d97f7c8-bdw6s\" (UID: \"3de03a8d-34d7-416d-8f1b-330ad429a3b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-bdw6s" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.031814 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/78405f10-b1f1-4401-8a3e-579ff5a739e1-service-ca-bundle\") pod \"router-default-5444994796-kgrgq\" (UID: \"78405f10-b1f1-4401-8a3e-579ff5a739e1\") " pod="openshift-ingress/router-default-5444994796-kgrgq" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.031833 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/45c5e131-b2e8-4a3f-8c26-0dfe5cacee11-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-dp4kr\" (UID: \"45c5e131-b2e8-4a3f-8c26-0dfe5cacee11\") " pod="openshift-marketplace/marketplace-operator-79b997595-dp4kr" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.031868 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/76d9d582-6e5f-4146-859a-5474e90dbe64-serving-cert\") pod \"etcd-operator-b45778765-xv9zg\" (UID: \"76d9d582-6e5f-4146-859a-5474e90dbe64\") " pod="openshift-etcd-operator/etcd-operator-b45778765-xv9zg" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.032786 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qkpg9\" (UniqueName: \"kubernetes.io/projected/3de03a8d-34d7-416d-8f1b-330ad429a3b1-kube-api-access-qkpg9\") pod \"image-registry-697d97f7c8-bdw6s\" (UID: \"3de03a8d-34d7-416d-8f1b-330ad429a3b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-bdw6s" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.032835 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/f26ca59f-fc9c-4cbd-9c82-bf0b756f9b88-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-jspml\" (UID: \"f26ca59f-fc9c-4cbd-9c82-bf0b756f9b88\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-jspml" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.032883 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6rgsm\" (UniqueName: \"kubernetes.io/projected/f26ca59f-fc9c-4cbd-9c82-bf0b756f9b88-kube-api-access-6rgsm\") pod \"control-plane-machine-set-operator-78cbb6b69f-jspml\" (UID: \"f26ca59f-fc9c-4cbd-9c82-bf0b756f9b88\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-jspml" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.033285 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/76d9d582-6e5f-4146-859a-5474e90dbe64-etcd-client\") pod \"etcd-operator-b45778765-xv9zg\" (UID: \"76d9d582-6e5f-4146-859a-5474e90dbe64\") " pod="openshift-etcd-operator/etcd-operator-b45778765-xv9zg" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.033305 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/78405f10-b1f1-4401-8a3e-579ff5a739e1-stats-auth\") pod \"router-default-5444994796-kgrgq\" (UID: \"78405f10-b1f1-4401-8a3e-579ff5a739e1\") " pod="openshift-ingress/router-default-5444994796-kgrgq" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.033812 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/3de03a8d-34d7-416d-8f1b-330ad429a3b1-trusted-ca\") pod \"image-registry-697d97f7c8-bdw6s\" (UID: \"3de03a8d-34d7-416d-8f1b-330ad429a3b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-bdw6s" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.033912 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xmxss\" (UniqueName: \"kubernetes.io/projected/76d9d582-6e5f-4146-859a-5474e90dbe64-kube-api-access-xmxss\") pod \"etcd-operator-b45778765-xv9zg\" (UID: \"76d9d582-6e5f-4146-859a-5474e90dbe64\") " pod="openshift-etcd-operator/etcd-operator-b45778765-xv9zg" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.033930 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/76d9d582-6e5f-4146-859a-5474e90dbe64-etcd-ca\") pod \"etcd-operator-b45778765-xv9zg\" (UID: \"76d9d582-6e5f-4146-859a-5474e90dbe64\") " pod="openshift-etcd-operator/etcd-operator-b45778765-xv9zg" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.033988 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/03cf12f7-f45c-46d4-a089-97618c3865b2-profile-collector-cert\") pod \"olm-operator-6b444d44fb-rxtt6\" (UID: \"03cf12f7-f45c-46d4-a089-97618c3865b2\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-rxtt6" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.034051 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/77b085fa-7b0d-45e0-9dec-5adc0eeecb61-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-5bh8x\" (UID: \"77b085fa-7b0d-45e0-9dec-5adc0eeecb61\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-5bh8x" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.034085 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/550d113c-cbae-420d-a970-3a266a94134c-proxy-tls\") pod \"machine-config-controller-84d6567774-lplcw\" (UID: \"550d113c-cbae-420d-a970-3a266a94134c\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-lplcw" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.034310 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x9qqr\" (UniqueName: \"kubernetes.io/projected/056140e0-6e96-41d3-8d80-bd9e33e56d8b-kube-api-access-x9qqr\") pod \"multus-admission-controller-857f4d67dd-jsvs2\" (UID: \"056140e0-6e96-41d3-8d80-bd9e33e56d8b\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-jsvs2" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.034340 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/45c5e131-b2e8-4a3f-8c26-0dfe5cacee11-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-dp4kr\" (UID: \"45c5e131-b2e8-4a3f-8c26-0dfe5cacee11\") " pod="openshift-marketplace/marketplace-operator-79b997595-dp4kr" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.034619 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/056140e0-6e96-41d3-8d80-bd9e33e56d8b-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-jsvs2\" (UID: \"056140e0-6e96-41d3-8d80-bd9e33e56d8b\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-jsvs2" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.034668 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zvfc4\" (UniqueName: \"kubernetes.io/projected/77b085fa-7b0d-45e0-9dec-5adc0eeecb61-kube-api-access-zvfc4\") pod \"openshift-controller-manager-operator-756b6f6bc6-5bh8x\" (UID: \"77b085fa-7b0d-45e0-9dec-5adc0eeecb61\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-5bh8x" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.034723 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bdw6s\" (UID: \"3de03a8d-34d7-416d-8f1b-330ad429a3b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-bdw6s" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.034751 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rdkp5\" (UniqueName: \"kubernetes.io/projected/03cf12f7-f45c-46d4-a089-97618c3865b2-kube-api-access-rdkp5\") pod \"olm-operator-6b444d44fb-rxtt6\" (UID: \"03cf12f7-f45c-46d4-a089-97618c3865b2\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-rxtt6" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.034776 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/78405f10-b1f1-4401-8a3e-579ff5a739e1-default-certificate\") pod \"router-default-5444994796-kgrgq\" (UID: \"78405f10-b1f1-4401-8a3e-579ff5a739e1\") " pod="openshift-ingress/router-default-5444994796-kgrgq" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.034791 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/03cf12f7-f45c-46d4-a089-97618c3865b2-srv-cert\") pod \"olm-operator-6b444d44fb-rxtt6\" (UID: \"03cf12f7-f45c-46d4-a089-97618c3865b2\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-rxtt6" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.034840 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/76d9d582-6e5f-4146-859a-5474e90dbe64-etcd-service-ca\") pod \"etcd-operator-b45778765-xv9zg\" (UID: \"76d9d582-6e5f-4146-859a-5474e90dbe64\") " pod="openshift-etcd-operator/etcd-operator-b45778765-xv9zg" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.034858 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/77b085fa-7b0d-45e0-9dec-5adc0eeecb61-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-5bh8x\" (UID: \"77b085fa-7b0d-45e0-9dec-5adc0eeecb61\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-5bh8x" Oct 11 03:57:27 crc kubenswrapper[4798]: E1011 03:57:27.039411 4798 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-11 03:57:27.539370233 +0000 UTC m=+142.875659919 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bdw6s" (UID: "3de03a8d-34d7-416d-8f1b-330ad429a3b1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.076944 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-qv29k" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.135927 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.136221 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/cf57b149-4889-439d-aaca-0ea06bbc1a45-auth-proxy-config\") pod \"machine-config-operator-74547568cd-68b7z\" (UID: \"cf57b149-4889-439d-aaca-0ea06bbc1a45\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-68b7z" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.136269 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/550d113c-cbae-420d-a970-3a266a94134c-proxy-tls\") pod \"machine-config-controller-84d6567774-lplcw\" (UID: \"550d113c-cbae-420d-a970-3a266a94134c\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-lplcw" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.136298 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x9qqr\" (UniqueName: \"kubernetes.io/projected/056140e0-6e96-41d3-8d80-bd9e33e56d8b-kube-api-access-x9qqr\") pod \"multus-admission-controller-857f4d67dd-jsvs2\" (UID: \"056140e0-6e96-41d3-8d80-bd9e33e56d8b\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-jsvs2" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.136322 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/cf57b149-4889-439d-aaca-0ea06bbc1a45-proxy-tls\") pod \"machine-config-operator-74547568cd-68b7z\" (UID: \"cf57b149-4889-439d-aaca-0ea06bbc1a45\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-68b7z" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.136347 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/45c5e131-b2e8-4a3f-8c26-0dfe5cacee11-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-dp4kr\" (UID: \"45c5e131-b2e8-4a3f-8c26-0dfe5cacee11\") " pod="openshift-marketplace/marketplace-operator-79b997595-dp4kr" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.136375 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6c3e154e-469a-47c1-a71a-b09597e9d303-config\") pod \"route-controller-manager-6576b87f9c-9c8v5\" (UID: \"6c3e154e-469a-47c1-a71a-b09597e9d303\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-9c8v5" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.136415 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/056140e0-6e96-41d3-8d80-bd9e33e56d8b-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-jsvs2\" (UID: \"056140e0-6e96-41d3-8d80-bd9e33e56d8b\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-jsvs2" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.136439 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/39e1644a-d213-4689-b241-667a5c3e0c21-signing-cabundle\") pod \"service-ca-9c57cc56f-48l5m\" (UID: \"39e1644a-d213-4689-b241-667a5c3e0c21\") " pod="openshift-service-ca/service-ca-9c57cc56f-48l5m" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.136460 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/e5439d4a-8ce5-48c3-b0db-9e4b24ebfcdd-secret-volume\") pod \"collect-profiles-29335905-vgmmx\" (UID: \"e5439d4a-8ce5-48c3-b0db-9e4b24ebfcdd\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29335905-vgmmx" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.136486 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zvfc4\" (UniqueName: \"kubernetes.io/projected/77b085fa-7b0d-45e0-9dec-5adc0eeecb61-kube-api-access-zvfc4\") pod \"openshift-controller-manager-operator-756b6f6bc6-5bh8x\" (UID: \"77b085fa-7b0d-45e0-9dec-5adc0eeecb61\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-5bh8x" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.136509 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/e5439d4a-8ce5-48c3-b0db-9e4b24ebfcdd-config-volume\") pod \"collect-profiles-29335905-vgmmx\" (UID: \"e5439d4a-8ce5-48c3-b0db-9e4b24ebfcdd\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29335905-vgmmx" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.136536 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/bdd198b5-674e-4bf8-afb5-15dbd2987ec8-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-cs7dc\" (UID: \"bdd198b5-674e-4bf8-afb5-15dbd2987ec8\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-cs7dc" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.136605 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rdkp5\" (UniqueName: \"kubernetes.io/projected/03cf12f7-f45c-46d4-a089-97618c3865b2-kube-api-access-rdkp5\") pod \"olm-operator-6b444d44fb-rxtt6\" (UID: \"03cf12f7-f45c-46d4-a089-97618c3865b2\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-rxtt6" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.136630 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/325af4a3-57e0-4c68-8c7f-153566048283-apiservice-cert\") pod \"packageserver-d55dfcdfc-g95pt\" (UID: \"325af4a3-57e0-4c68-8c7f-153566048283\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-g95pt" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.136656 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/78405f10-b1f1-4401-8a3e-579ff5a739e1-default-certificate\") pod \"router-default-5444994796-kgrgq\" (UID: \"78405f10-b1f1-4401-8a3e-579ff5a739e1\") " pod="openshift-ingress/router-default-5444994796-kgrgq" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.136680 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/03cf12f7-f45c-46d4-a089-97618c3865b2-srv-cert\") pod \"olm-operator-6b444d44fb-rxtt6\" (UID: \"03cf12f7-f45c-46d4-a089-97618c3865b2\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-rxtt6" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.136707 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/6c3e154e-469a-47c1-a71a-b09597e9d303-client-ca\") pod \"route-controller-manager-6576b87f9c-9c8v5\" (UID: \"6c3e154e-469a-47c1-a71a-b09597e9d303\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-9c8v5" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.136734 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3a8f8093-7e4c-450f-8eab-72d11ae9c846-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-jtrv7\" (UID: \"3a8f8093-7e4c-450f-8eab-72d11ae9c846\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-jtrv7" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.136779 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-29frm\" (UniqueName: \"kubernetes.io/projected/f001bcce-3b27-477b-9ebf-46fdb31d546c-kube-api-access-29frm\") pod \"machine-config-server-98vrc\" (UID: \"f001bcce-3b27-477b-9ebf-46fdb31d546c\") " pod="openshift-machine-config-operator/machine-config-server-98vrc" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.136818 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j9c42\" (UniqueName: \"kubernetes.io/projected/325af4a3-57e0-4c68-8c7f-153566048283-kube-api-access-j9c42\") pod \"packageserver-d55dfcdfc-g95pt\" (UID: \"325af4a3-57e0-4c68-8c7f-153566048283\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-g95pt" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.136843 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-czhff\" (UniqueName: \"kubernetes.io/projected/972678a7-921f-4581-a5d4-8fcc57387afc-kube-api-access-czhff\") pod \"csi-hostpathplugin-dxlvp\" (UID: \"972678a7-921f-4581-a5d4-8fcc57387afc\") " pod="hostpath-provisioner/csi-hostpathplugin-dxlvp" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.136872 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/76d9d582-6e5f-4146-859a-5474e90dbe64-etcd-service-ca\") pod \"etcd-operator-b45778765-xv9zg\" (UID: \"76d9d582-6e5f-4146-859a-5474e90dbe64\") " pod="openshift-etcd-operator/etcd-operator-b45778765-xv9zg" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.136900 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/77b085fa-7b0d-45e0-9dec-5adc0eeecb61-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-5bh8x\" (UID: \"77b085fa-7b0d-45e0-9dec-5adc0eeecb61\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-5bh8x" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.136923 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/325af4a3-57e0-4c68-8c7f-153566048283-tmpfs\") pod \"packageserver-d55dfcdfc-g95pt\" (UID: \"325af4a3-57e0-4c68-8c7f-153566048283\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-g95pt" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.136948 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/88b86aaf-78be-4bbb-ad2f-a404029803b3-metrics-tls\") pod \"dns-default-z5h6j\" (UID: \"88b86aaf-78be-4bbb-ad2f-a404029803b3\") " pod="openshift-dns/dns-default-z5h6j" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.136974 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f8n5k\" (UniqueName: \"kubernetes.io/projected/550d113c-cbae-420d-a970-3a266a94134c-kube-api-access-f8n5k\") pod \"machine-config-controller-84d6567774-lplcw\" (UID: \"550d113c-cbae-420d-a970-3a266a94134c\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-lplcw" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.137014 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qncpc\" (UniqueName: \"kubernetes.io/projected/7965ee7e-8332-46c1-aff0-c2d96ea976dc-kube-api-access-qncpc\") pod \"migrator-59844c95c7-g52t6\" (UID: \"7965ee7e-8332-46c1-aff0-c2d96ea976dc\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-g52t6" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.137173 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-84qkq\" (UniqueName: \"kubernetes.io/projected/88b86aaf-78be-4bbb-ad2f-a404029803b3-kube-api-access-84qkq\") pod \"dns-default-z5h6j\" (UID: \"88b86aaf-78be-4bbb-ad2f-a404029803b3\") " pod="openshift-dns/dns-default-z5h6j" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.137220 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tgcgj\" (UniqueName: \"kubernetes.io/projected/55408d92-301d-4b54-9cb1-09dc11423c33-kube-api-access-tgcgj\") pod \"downloads-7954f5f757-g2k9v\" (UID: \"55408d92-301d-4b54-9cb1-09dc11423c33\") " pod="openshift-console/downloads-7954f5f757-g2k9v" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.137247 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/f001bcce-3b27-477b-9ebf-46fdb31d546c-certs\") pod \"machine-config-server-98vrc\" (UID: \"f001bcce-3b27-477b-9ebf-46fdb31d546c\") " pod="openshift-machine-config-operator/machine-config-server-98vrc" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.137301 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/325af4a3-57e0-4c68-8c7f-153566048283-webhook-cert\") pod \"packageserver-d55dfcdfc-g95pt\" (UID: \"325af4a3-57e0-4c68-8c7f-153566048283\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-g95pt" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.137376 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/3de03a8d-34d7-416d-8f1b-330ad429a3b1-bound-sa-token\") pod \"image-registry-697d97f7c8-bdw6s\" (UID: \"3de03a8d-34d7-416d-8f1b-330ad429a3b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-bdw6s" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.137428 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/76d9d582-6e5f-4146-859a-5474e90dbe64-config\") pod \"etcd-operator-b45778765-xv9zg\" (UID: \"76d9d582-6e5f-4146-859a-5474e90dbe64\") " pod="openshift-etcd-operator/etcd-operator-b45778765-xv9zg" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.137454 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/78405f10-b1f1-4401-8a3e-579ff5a739e1-metrics-certs\") pod \"router-default-5444994796-kgrgq\" (UID: \"78405f10-b1f1-4401-8a3e-579ff5a739e1\") " pod="openshift-ingress/router-default-5444994796-kgrgq" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.137485 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w4rqk\" (UniqueName: \"kubernetes.io/projected/136055f4-c38a-4cb1-8259-fcf1143c0bcf-kube-api-access-w4rqk\") pod \"service-ca-operator-777779d784-xndjl\" (UID: \"136055f4-c38a-4cb1-8259-fcf1143c0bcf\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-xndjl" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.137513 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/550d113c-cbae-420d-a970-3a266a94134c-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-lplcw\" (UID: \"550d113c-cbae-420d-a970-3a266a94134c\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-lplcw" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.137542 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/cf57b149-4889-439d-aaca-0ea06bbc1a45-images\") pod \"machine-config-operator-74547568cd-68b7z\" (UID: \"cf57b149-4889-439d-aaca-0ea06bbc1a45\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-68b7z" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.137569 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/3de03a8d-34d7-416d-8f1b-330ad429a3b1-ca-trust-extracted\") pod \"image-registry-697d97f7c8-bdw6s\" (UID: \"3de03a8d-34d7-416d-8f1b-330ad429a3b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-bdw6s" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.137599 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n9jbz\" (UniqueName: \"kubernetes.io/projected/3a8f8093-7e4c-450f-8eab-72d11ae9c846-kube-api-access-n9jbz\") pod \"package-server-manager-789f6589d5-jtrv7\" (UID: \"3a8f8093-7e4c-450f-8eab-72d11ae9c846\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-jtrv7" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.137652 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/39e1644a-d213-4689-b241-667a5c3e0c21-signing-key\") pod \"service-ca-9c57cc56f-48l5m\" (UID: \"39e1644a-d213-4689-b241-667a5c3e0c21\") " pod="openshift-service-ca/service-ca-9c57cc56f-48l5m" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.137678 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-52hs6\" (UniqueName: \"kubernetes.io/projected/39e1644a-d213-4689-b241-667a5c3e0c21-kube-api-access-52hs6\") pod \"service-ca-9c57cc56f-48l5m\" (UID: \"39e1644a-d213-4689-b241-667a5c3e0c21\") " pod="openshift-service-ca/service-ca-9c57cc56f-48l5m" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.137704 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-49pc7\" (UniqueName: \"kubernetes.io/projected/6bf5263f-746e-4224-a63b-9a08d32e9e77-kube-api-access-49pc7\") pod \"ingress-canary-rdggm\" (UID: \"6bf5263f-746e-4224-a63b-9a08d32e9e77\") " pod="openshift-ingress-canary/ingress-canary-rdggm" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.137731 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/f001bcce-3b27-477b-9ebf-46fdb31d546c-node-bootstrap-token\") pod \"machine-config-server-98vrc\" (UID: \"f001bcce-3b27-477b-9ebf-46fdb31d546c\") " pod="openshift-machine-config-operator/machine-config-server-98vrc" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.137757 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/3de03a8d-34d7-416d-8f1b-330ad429a3b1-registry-tls\") pod \"image-registry-697d97f7c8-bdw6s\" (UID: \"3de03a8d-34d7-416d-8f1b-330ad429a3b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-bdw6s" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.137783 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xfnw9\" (UniqueName: \"kubernetes.io/projected/45c5e131-b2e8-4a3f-8c26-0dfe5cacee11-kube-api-access-xfnw9\") pod \"marketplace-operator-79b997595-dp4kr\" (UID: \"45c5e131-b2e8-4a3f-8c26-0dfe5cacee11\") " pod="openshift-marketplace/marketplace-operator-79b997595-dp4kr" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.137808 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zpbbg\" (UniqueName: \"kubernetes.io/projected/78405f10-b1f1-4401-8a3e-579ff5a739e1-kube-api-access-zpbbg\") pod \"router-default-5444994796-kgrgq\" (UID: \"78405f10-b1f1-4401-8a3e-579ff5a739e1\") " pod="openshift-ingress/router-default-5444994796-kgrgq" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.137852 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/972678a7-921f-4581-a5d4-8fcc57387afc-socket-dir\") pod \"csi-hostpathplugin-dxlvp\" (UID: \"972678a7-921f-4581-a5d4-8fcc57387afc\") " pod="hostpath-provisioner/csi-hostpathplugin-dxlvp" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.137919 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m9d7x\" (UniqueName: \"kubernetes.io/projected/e5439d4a-8ce5-48c3-b0db-9e4b24ebfcdd-kube-api-access-m9d7x\") pod \"collect-profiles-29335905-vgmmx\" (UID: \"e5439d4a-8ce5-48c3-b0db-9e4b24ebfcdd\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29335905-vgmmx" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.137950 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8vfz2\" (UniqueName: \"kubernetes.io/projected/6c3e154e-469a-47c1-a71a-b09597e9d303-kube-api-access-8vfz2\") pod \"route-controller-manager-6576b87f9c-9c8v5\" (UID: \"6c3e154e-469a-47c1-a71a-b09597e9d303\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-9c8v5" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.137975 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/972678a7-921f-4581-a5d4-8fcc57387afc-plugins-dir\") pod \"csi-hostpathplugin-dxlvp\" (UID: \"972678a7-921f-4581-a5d4-8fcc57387afc\") " pod="hostpath-provisioner/csi-hostpathplugin-dxlvp" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.138048 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bdd198b5-674e-4bf8-afb5-15dbd2987ec8-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-cs7dc\" (UID: \"bdd198b5-674e-4bf8-afb5-15dbd2987ec8\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-cs7dc" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.138080 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/3de03a8d-34d7-416d-8f1b-330ad429a3b1-registry-certificates\") pod \"image-registry-697d97f7c8-bdw6s\" (UID: \"3de03a8d-34d7-416d-8f1b-330ad429a3b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-bdw6s" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.138108 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6c3e154e-469a-47c1-a71a-b09597e9d303-serving-cert\") pod \"route-controller-manager-6576b87f9c-9c8v5\" (UID: \"6c3e154e-469a-47c1-a71a-b09597e9d303\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-9c8v5" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.138134 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/972678a7-921f-4581-a5d4-8fcc57387afc-csi-data-dir\") pod \"csi-hostpathplugin-dxlvp\" (UID: \"972678a7-921f-4581-a5d4-8fcc57387afc\") " pod="hostpath-provisioner/csi-hostpathplugin-dxlvp" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.138162 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/3de03a8d-34d7-416d-8f1b-330ad429a3b1-installation-pull-secrets\") pod \"image-registry-697d97f7c8-bdw6s\" (UID: \"3de03a8d-34d7-416d-8f1b-330ad429a3b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-bdw6s" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.138188 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dz6m8\" (UniqueName: \"kubernetes.io/projected/5626f7eb-4f04-4292-a479-fadf2514fa78-kube-api-access-dz6m8\") pod \"catalog-operator-68c6474976-64mrh\" (UID: \"5626f7eb-4f04-4292-a479-fadf2514fa78\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-64mrh" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.138215 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/45c5e131-b2e8-4a3f-8c26-0dfe5cacee11-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-dp4kr\" (UID: \"45c5e131-b2e8-4a3f-8c26-0dfe5cacee11\") " pod="openshift-marketplace/marketplace-operator-79b997595-dp4kr" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.138239 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/78405f10-b1f1-4401-8a3e-579ff5a739e1-service-ca-bundle\") pod \"router-default-5444994796-kgrgq\" (UID: \"78405f10-b1f1-4401-8a3e-579ff5a739e1\") " pod="openshift-ingress/router-default-5444994796-kgrgq" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.138268 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/76d9d582-6e5f-4146-859a-5474e90dbe64-serving-cert\") pod \"etcd-operator-b45778765-xv9zg\" (UID: \"76d9d582-6e5f-4146-859a-5474e90dbe64\") " pod="openshift-etcd-operator/etcd-operator-b45778765-xv9zg" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.138307 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qkpg9\" (UniqueName: \"kubernetes.io/projected/3de03a8d-34d7-416d-8f1b-330ad429a3b1-kube-api-access-qkpg9\") pod \"image-registry-697d97f7c8-bdw6s\" (UID: \"3de03a8d-34d7-416d-8f1b-330ad429a3b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-bdw6s" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.138346 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/f26ca59f-fc9c-4cbd-9c82-bf0b756f9b88-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-jspml\" (UID: \"f26ca59f-fc9c-4cbd-9c82-bf0b756f9b88\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-jspml" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.138378 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/136055f4-c38a-4cb1-8259-fcf1143c0bcf-serving-cert\") pod \"service-ca-operator-777779d784-xndjl\" (UID: \"136055f4-c38a-4cb1-8259-fcf1143c0bcf\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-xndjl" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.138459 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/88b86aaf-78be-4bbb-ad2f-a404029803b3-config-volume\") pod \"dns-default-z5h6j\" (UID: \"88b86aaf-78be-4bbb-ad2f-a404029803b3\") " pod="openshift-dns/dns-default-z5h6j" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.138536 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t66m8\" (UniqueName: \"kubernetes.io/projected/cf57b149-4889-439d-aaca-0ea06bbc1a45-kube-api-access-t66m8\") pod \"machine-config-operator-74547568cd-68b7z\" (UID: \"cf57b149-4889-439d-aaca-0ea06bbc1a45\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-68b7z" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.138561 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/972678a7-921f-4581-a5d4-8fcc57387afc-registration-dir\") pod \"csi-hostpathplugin-dxlvp\" (UID: \"972678a7-921f-4581-a5d4-8fcc57387afc\") " pod="hostpath-provisioner/csi-hostpathplugin-dxlvp" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.138601 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bdd198b5-674e-4bf8-afb5-15dbd2987ec8-config\") pod \"kube-controller-manager-operator-78b949d7b-cs7dc\" (UID: \"bdd198b5-674e-4bf8-afb5-15dbd2987ec8\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-cs7dc" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.138632 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6rgsm\" (UniqueName: \"kubernetes.io/projected/f26ca59f-fc9c-4cbd-9c82-bf0b756f9b88-kube-api-access-6rgsm\") pod \"control-plane-machine-set-operator-78cbb6b69f-jspml\" (UID: \"f26ca59f-fc9c-4cbd-9c82-bf0b756f9b88\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-jspml" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.138659 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/76d9d582-6e5f-4146-859a-5474e90dbe64-etcd-client\") pod \"etcd-operator-b45778765-xv9zg\" (UID: \"76d9d582-6e5f-4146-859a-5474e90dbe64\") " pod="openshift-etcd-operator/etcd-operator-b45778765-xv9zg" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.138684 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/78405f10-b1f1-4401-8a3e-579ff5a739e1-stats-auth\") pod \"router-default-5444994796-kgrgq\" (UID: \"78405f10-b1f1-4401-8a3e-579ff5a739e1\") " pod="openshift-ingress/router-default-5444994796-kgrgq" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.138718 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/136055f4-c38a-4cb1-8259-fcf1143c0bcf-config\") pod \"service-ca-operator-777779d784-xndjl\" (UID: \"136055f4-c38a-4cb1-8259-fcf1143c0bcf\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-xndjl" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.138757 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/3de03a8d-34d7-416d-8f1b-330ad429a3b1-trusted-ca\") pod \"image-registry-697d97f7c8-bdw6s\" (UID: \"3de03a8d-34d7-416d-8f1b-330ad429a3b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-bdw6s" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.138790 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/5626f7eb-4f04-4292-a479-fadf2514fa78-profile-collector-cert\") pod \"catalog-operator-68c6474976-64mrh\" (UID: \"5626f7eb-4f04-4292-a479-fadf2514fa78\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-64mrh" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.138812 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/5626f7eb-4f04-4292-a479-fadf2514fa78-srv-cert\") pod \"catalog-operator-68c6474976-64mrh\" (UID: \"5626f7eb-4f04-4292-a479-fadf2514fa78\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-64mrh" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.138835 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/6bf5263f-746e-4224-a63b-9a08d32e9e77-cert\") pod \"ingress-canary-rdggm\" (UID: \"6bf5263f-746e-4224-a63b-9a08d32e9e77\") " pod="openshift-ingress-canary/ingress-canary-rdggm" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.138870 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xmxss\" (UniqueName: \"kubernetes.io/projected/76d9d582-6e5f-4146-859a-5474e90dbe64-kube-api-access-xmxss\") pod \"etcd-operator-b45778765-xv9zg\" (UID: \"76d9d582-6e5f-4146-859a-5474e90dbe64\") " pod="openshift-etcd-operator/etcd-operator-b45778765-xv9zg" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.138895 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/76d9d582-6e5f-4146-859a-5474e90dbe64-etcd-ca\") pod \"etcd-operator-b45778765-xv9zg\" (UID: \"76d9d582-6e5f-4146-859a-5474e90dbe64\") " pod="openshift-etcd-operator/etcd-operator-b45778765-xv9zg" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.138921 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/03cf12f7-f45c-46d4-a089-97618c3865b2-profile-collector-cert\") pod \"olm-operator-6b444d44fb-rxtt6\" (UID: \"03cf12f7-f45c-46d4-a089-97618c3865b2\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-rxtt6" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.138959 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/77b085fa-7b0d-45e0-9dec-5adc0eeecb61-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-5bh8x\" (UID: \"77b085fa-7b0d-45e0-9dec-5adc0eeecb61\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-5bh8x" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.138987 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/972678a7-921f-4581-a5d4-8fcc57387afc-mountpoint-dir\") pod \"csi-hostpathplugin-dxlvp\" (UID: \"972678a7-921f-4581-a5d4-8fcc57387afc\") " pod="hostpath-provisioner/csi-hostpathplugin-dxlvp" Oct 11 03:57:27 crc kubenswrapper[4798]: E1011 03:57:27.139152 4798 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-11 03:57:27.639126701 +0000 UTC m=+142.975416387 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.141324 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/76d9d582-6e5f-4146-859a-5474e90dbe64-etcd-service-ca\") pod \"etcd-operator-b45778765-xv9zg\" (UID: \"76d9d582-6e5f-4146-859a-5474e90dbe64\") " pod="openshift-etcd-operator/etcd-operator-b45778765-xv9zg" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.142280 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/76d9d582-6e5f-4146-859a-5474e90dbe64-config\") pod \"etcd-operator-b45778765-xv9zg\" (UID: \"76d9d582-6e5f-4146-859a-5474e90dbe64\") " pod="openshift-etcd-operator/etcd-operator-b45778765-xv9zg" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.142752 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/45c5e131-b2e8-4a3f-8c26-0dfe5cacee11-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-dp4kr\" (UID: \"45c5e131-b2e8-4a3f-8c26-0dfe5cacee11\") " pod="openshift-marketplace/marketplace-operator-79b997595-dp4kr" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.142986 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/550d113c-cbae-420d-a970-3a266a94134c-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-lplcw\" (UID: \"550d113c-cbae-420d-a970-3a266a94134c\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-lplcw" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.143782 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/3de03a8d-34d7-416d-8f1b-330ad429a3b1-ca-trust-extracted\") pod \"image-registry-697d97f7c8-bdw6s\" (UID: \"3de03a8d-34d7-416d-8f1b-330ad429a3b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-bdw6s" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.146993 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/3de03a8d-34d7-416d-8f1b-330ad429a3b1-registry-certificates\") pod \"image-registry-697d97f7c8-bdw6s\" (UID: \"3de03a8d-34d7-416d-8f1b-330ad429a3b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-bdw6s" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.147690 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/76d9d582-6e5f-4146-859a-5474e90dbe64-etcd-ca\") pod \"etcd-operator-b45778765-xv9zg\" (UID: \"76d9d582-6e5f-4146-859a-5474e90dbe64\") " pod="openshift-etcd-operator/etcd-operator-b45778765-xv9zg" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.147876 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/3de03a8d-34d7-416d-8f1b-330ad429a3b1-trusted-ca\") pod \"image-registry-697d97f7c8-bdw6s\" (UID: \"3de03a8d-34d7-416d-8f1b-330ad429a3b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-bdw6s" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.147975 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/78405f10-b1f1-4401-8a3e-579ff5a739e1-service-ca-bundle\") pod \"router-default-5444994796-kgrgq\" (UID: \"78405f10-b1f1-4401-8a3e-579ff5a739e1\") " pod="openshift-ingress/router-default-5444994796-kgrgq" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.148370 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/76d9d582-6e5f-4146-859a-5474e90dbe64-serving-cert\") pod \"etcd-operator-b45778765-xv9zg\" (UID: \"76d9d582-6e5f-4146-859a-5474e90dbe64\") " pod="openshift-etcd-operator/etcd-operator-b45778765-xv9zg" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.148726 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/77b085fa-7b0d-45e0-9dec-5adc0eeecb61-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-5bh8x\" (UID: \"77b085fa-7b0d-45e0-9dec-5adc0eeecb61\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-5bh8x" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.148949 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/f26ca59f-fc9c-4cbd-9c82-bf0b756f9b88-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-jspml\" (UID: \"f26ca59f-fc9c-4cbd-9c82-bf0b756f9b88\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-jspml" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.149013 4798 patch_prober.go:28] interesting pod/machine-config-daemon-h28s2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.149051 4798 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.149652 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/78405f10-b1f1-4401-8a3e-579ff5a739e1-default-certificate\") pod \"router-default-5444994796-kgrgq\" (UID: \"78405f10-b1f1-4401-8a3e-579ff5a739e1\") " pod="openshift-ingress/router-default-5444994796-kgrgq" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.152057 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/3de03a8d-34d7-416d-8f1b-330ad429a3b1-registry-tls\") pod \"image-registry-697d97f7c8-bdw6s\" (UID: \"3de03a8d-34d7-416d-8f1b-330ad429a3b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-bdw6s" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.154941 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/45c5e131-b2e8-4a3f-8c26-0dfe5cacee11-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-dp4kr\" (UID: \"45c5e131-b2e8-4a3f-8c26-0dfe5cacee11\") " pod="openshift-marketplace/marketplace-operator-79b997595-dp4kr" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.155231 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/78405f10-b1f1-4401-8a3e-579ff5a739e1-stats-auth\") pod \"router-default-5444994796-kgrgq\" (UID: \"78405f10-b1f1-4401-8a3e-579ff5a739e1\") " pod="openshift-ingress/router-default-5444994796-kgrgq" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.155266 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/550d113c-cbae-420d-a970-3a266a94134c-proxy-tls\") pod \"machine-config-controller-84d6567774-lplcw\" (UID: \"550d113c-cbae-420d-a970-3a266a94134c\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-lplcw" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.155615 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/78405f10-b1f1-4401-8a3e-579ff5a739e1-metrics-certs\") pod \"router-default-5444994796-kgrgq\" (UID: \"78405f10-b1f1-4401-8a3e-579ff5a739e1\") " pod="openshift-ingress/router-default-5444994796-kgrgq" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.158764 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/056140e0-6e96-41d3-8d80-bd9e33e56d8b-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-jsvs2\" (UID: \"056140e0-6e96-41d3-8d80-bd9e33e56d8b\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-jsvs2" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.159448 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/76d9d582-6e5f-4146-859a-5474e90dbe64-etcd-client\") pod \"etcd-operator-b45778765-xv9zg\" (UID: \"76d9d582-6e5f-4146-859a-5474e90dbe64\") " pod="openshift-etcd-operator/etcd-operator-b45778765-xv9zg" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.162798 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/03cf12f7-f45c-46d4-a089-97618c3865b2-profile-collector-cert\") pod \"olm-operator-6b444d44fb-rxtt6\" (UID: \"03cf12f7-f45c-46d4-a089-97618c3865b2\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-rxtt6" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.165978 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/03cf12f7-f45c-46d4-a089-97618c3865b2-srv-cert\") pod \"olm-operator-6b444d44fb-rxtt6\" (UID: \"03cf12f7-f45c-46d4-a089-97618c3865b2\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-rxtt6" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.166385 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/3de03a8d-34d7-416d-8f1b-330ad429a3b1-installation-pull-secrets\") pod \"image-registry-697d97f7c8-bdw6s\" (UID: \"3de03a8d-34d7-416d-8f1b-330ad429a3b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-bdw6s" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.169230 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/77b085fa-7b0d-45e0-9dec-5adc0eeecb61-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-5bh8x\" (UID: \"77b085fa-7b0d-45e0-9dec-5adc0eeecb61\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-5bh8x" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.191295 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x9qqr\" (UniqueName: \"kubernetes.io/projected/056140e0-6e96-41d3-8d80-bd9e33e56d8b-kube-api-access-x9qqr\") pod \"multus-admission-controller-857f4d67dd-jsvs2\" (UID: \"056140e0-6e96-41d3-8d80-bd9e33e56d8b\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-jsvs2" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.215378 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rdkp5\" (UniqueName: \"kubernetes.io/projected/03cf12f7-f45c-46d4-a089-97618c3865b2-kube-api-access-rdkp5\") pod \"olm-operator-6b444d44fb-rxtt6\" (UID: \"03cf12f7-f45c-46d4-a089-97618c3865b2\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-rxtt6" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.217769 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-wqjfc"] Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.224818 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-ndqkx"] Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.243855 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zvfc4\" (UniqueName: \"kubernetes.io/projected/77b085fa-7b0d-45e0-9dec-5adc0eeecb61-kube-api-access-zvfc4\") pod \"openshift-controller-manager-operator-756b6f6bc6-5bh8x\" (UID: \"77b085fa-7b0d-45e0-9dec-5adc0eeecb61\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-5bh8x" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.245018 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-29frm\" (UniqueName: \"kubernetes.io/projected/f001bcce-3b27-477b-9ebf-46fdb31d546c-kube-api-access-29frm\") pod \"machine-config-server-98vrc\" (UID: \"f001bcce-3b27-477b-9ebf-46fdb31d546c\") " pod="openshift-machine-config-operator/machine-config-server-98vrc" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.245053 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j9c42\" (UniqueName: \"kubernetes.io/projected/325af4a3-57e0-4c68-8c7f-153566048283-kube-api-access-j9c42\") pod \"packageserver-d55dfcdfc-g95pt\" (UID: \"325af4a3-57e0-4c68-8c7f-153566048283\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-g95pt" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.245082 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-czhff\" (UniqueName: \"kubernetes.io/projected/972678a7-921f-4581-a5d4-8fcc57387afc-kube-api-access-czhff\") pod \"csi-hostpathplugin-dxlvp\" (UID: \"972678a7-921f-4581-a5d4-8fcc57387afc\") " pod="hostpath-provisioner/csi-hostpathplugin-dxlvp" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.245110 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/325af4a3-57e0-4c68-8c7f-153566048283-tmpfs\") pod \"packageserver-d55dfcdfc-g95pt\" (UID: \"325af4a3-57e0-4c68-8c7f-153566048283\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-g95pt" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.245134 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-rxtt6" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.245146 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/88b86aaf-78be-4bbb-ad2f-a404029803b3-metrics-tls\") pod \"dns-default-z5h6j\" (UID: \"88b86aaf-78be-4bbb-ad2f-a404029803b3\") " pod="openshift-dns/dns-default-z5h6j" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.245195 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-84qkq\" (UniqueName: \"kubernetes.io/projected/88b86aaf-78be-4bbb-ad2f-a404029803b3-kube-api-access-84qkq\") pod \"dns-default-z5h6j\" (UID: \"88b86aaf-78be-4bbb-ad2f-a404029803b3\") " pod="openshift-dns/dns-default-z5h6j" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.245229 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/f001bcce-3b27-477b-9ebf-46fdb31d546c-certs\") pod \"machine-config-server-98vrc\" (UID: \"f001bcce-3b27-477b-9ebf-46fdb31d546c\") " pod="openshift-machine-config-operator/machine-config-server-98vrc" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.245254 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/325af4a3-57e0-4c68-8c7f-153566048283-webhook-cert\") pod \"packageserver-d55dfcdfc-g95pt\" (UID: \"325af4a3-57e0-4c68-8c7f-153566048283\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-g95pt" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.245292 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w4rqk\" (UniqueName: \"kubernetes.io/projected/136055f4-c38a-4cb1-8259-fcf1143c0bcf-kube-api-access-w4rqk\") pod \"service-ca-operator-777779d784-xndjl\" (UID: \"136055f4-c38a-4cb1-8259-fcf1143c0bcf\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-xndjl" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.245318 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/cf57b149-4889-439d-aaca-0ea06bbc1a45-images\") pod \"machine-config-operator-74547568cd-68b7z\" (UID: \"cf57b149-4889-439d-aaca-0ea06bbc1a45\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-68b7z" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.245341 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n9jbz\" (UniqueName: \"kubernetes.io/projected/3a8f8093-7e4c-450f-8eab-72d11ae9c846-kube-api-access-n9jbz\") pod \"package-server-manager-789f6589d5-jtrv7\" (UID: \"3a8f8093-7e4c-450f-8eab-72d11ae9c846\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-jtrv7" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.245364 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/39e1644a-d213-4689-b241-667a5c3e0c21-signing-key\") pod \"service-ca-9c57cc56f-48l5m\" (UID: \"39e1644a-d213-4689-b241-667a5c3e0c21\") " pod="openshift-service-ca/service-ca-9c57cc56f-48l5m" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.245385 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-52hs6\" (UniqueName: \"kubernetes.io/projected/39e1644a-d213-4689-b241-667a5c3e0c21-kube-api-access-52hs6\") pod \"service-ca-9c57cc56f-48l5m\" (UID: \"39e1644a-d213-4689-b241-667a5c3e0c21\") " pod="openshift-service-ca/service-ca-9c57cc56f-48l5m" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.245429 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-49pc7\" (UniqueName: \"kubernetes.io/projected/6bf5263f-746e-4224-a63b-9a08d32e9e77-kube-api-access-49pc7\") pod \"ingress-canary-rdggm\" (UID: \"6bf5263f-746e-4224-a63b-9a08d32e9e77\") " pod="openshift-ingress-canary/ingress-canary-rdggm" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.245644 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/f001bcce-3b27-477b-9ebf-46fdb31d546c-node-bootstrap-token\") pod \"machine-config-server-98vrc\" (UID: \"f001bcce-3b27-477b-9ebf-46fdb31d546c\") " pod="openshift-machine-config-operator/machine-config-server-98vrc" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.245687 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/972678a7-921f-4581-a5d4-8fcc57387afc-socket-dir\") pod \"csi-hostpathplugin-dxlvp\" (UID: \"972678a7-921f-4581-a5d4-8fcc57387afc\") " pod="hostpath-provisioner/csi-hostpathplugin-dxlvp" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.245710 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m9d7x\" (UniqueName: \"kubernetes.io/projected/e5439d4a-8ce5-48c3-b0db-9e4b24ebfcdd-kube-api-access-m9d7x\") pod \"collect-profiles-29335905-vgmmx\" (UID: \"e5439d4a-8ce5-48c3-b0db-9e4b24ebfcdd\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29335905-vgmmx" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.245738 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8vfz2\" (UniqueName: \"kubernetes.io/projected/6c3e154e-469a-47c1-a71a-b09597e9d303-kube-api-access-8vfz2\") pod \"route-controller-manager-6576b87f9c-9c8v5\" (UID: \"6c3e154e-469a-47c1-a71a-b09597e9d303\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-9c8v5" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.245762 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/972678a7-921f-4581-a5d4-8fcc57387afc-plugins-dir\") pod \"csi-hostpathplugin-dxlvp\" (UID: \"972678a7-921f-4581-a5d4-8fcc57387afc\") " pod="hostpath-provisioner/csi-hostpathplugin-dxlvp" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.245796 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bdd198b5-674e-4bf8-afb5-15dbd2987ec8-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-cs7dc\" (UID: \"bdd198b5-674e-4bf8-afb5-15dbd2987ec8\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-cs7dc" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.245803 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/325af4a3-57e0-4c68-8c7f-153566048283-tmpfs\") pod \"packageserver-d55dfcdfc-g95pt\" (UID: \"325af4a3-57e0-4c68-8c7f-153566048283\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-g95pt" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.245821 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6c3e154e-469a-47c1-a71a-b09597e9d303-serving-cert\") pod \"route-controller-manager-6576b87f9c-9c8v5\" (UID: \"6c3e154e-469a-47c1-a71a-b09597e9d303\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-9c8v5" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.245845 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/972678a7-921f-4581-a5d4-8fcc57387afc-csi-data-dir\") pod \"csi-hostpathplugin-dxlvp\" (UID: \"972678a7-921f-4581-a5d4-8fcc57387afc\") " pod="hostpath-provisioner/csi-hostpathplugin-dxlvp" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.245875 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dz6m8\" (UniqueName: \"kubernetes.io/projected/5626f7eb-4f04-4292-a479-fadf2514fa78-kube-api-access-dz6m8\") pod \"catalog-operator-68c6474976-64mrh\" (UID: \"5626f7eb-4f04-4292-a479-fadf2514fa78\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-64mrh" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.245920 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/136055f4-c38a-4cb1-8259-fcf1143c0bcf-serving-cert\") pod \"service-ca-operator-777779d784-xndjl\" (UID: \"136055f4-c38a-4cb1-8259-fcf1143c0bcf\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-xndjl" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.245963 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/88b86aaf-78be-4bbb-ad2f-a404029803b3-config-volume\") pod \"dns-default-z5h6j\" (UID: \"88b86aaf-78be-4bbb-ad2f-a404029803b3\") " pod="openshift-dns/dns-default-z5h6j" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.245988 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t66m8\" (UniqueName: \"kubernetes.io/projected/cf57b149-4889-439d-aaca-0ea06bbc1a45-kube-api-access-t66m8\") pod \"machine-config-operator-74547568cd-68b7z\" (UID: \"cf57b149-4889-439d-aaca-0ea06bbc1a45\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-68b7z" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.246012 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/972678a7-921f-4581-a5d4-8fcc57387afc-registration-dir\") pod \"csi-hostpathplugin-dxlvp\" (UID: \"972678a7-921f-4581-a5d4-8fcc57387afc\") " pod="hostpath-provisioner/csi-hostpathplugin-dxlvp" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.246039 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bdd198b5-674e-4bf8-afb5-15dbd2987ec8-config\") pod \"kube-controller-manager-operator-78b949d7b-cs7dc\" (UID: \"bdd198b5-674e-4bf8-afb5-15dbd2987ec8\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-cs7dc" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.246071 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/136055f4-c38a-4cb1-8259-fcf1143c0bcf-config\") pod \"service-ca-operator-777779d784-xndjl\" (UID: \"136055f4-c38a-4cb1-8259-fcf1143c0bcf\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-xndjl" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.246125 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/5626f7eb-4f04-4292-a479-fadf2514fa78-profile-collector-cert\") pod \"catalog-operator-68c6474976-64mrh\" (UID: \"5626f7eb-4f04-4292-a479-fadf2514fa78\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-64mrh" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.246146 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/5626f7eb-4f04-4292-a479-fadf2514fa78-srv-cert\") pod \"catalog-operator-68c6474976-64mrh\" (UID: \"5626f7eb-4f04-4292-a479-fadf2514fa78\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-64mrh" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.246168 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/6bf5263f-746e-4224-a63b-9a08d32e9e77-cert\") pod \"ingress-canary-rdggm\" (UID: \"6bf5263f-746e-4224-a63b-9a08d32e9e77\") " pod="openshift-ingress-canary/ingress-canary-rdggm" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.246222 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/972678a7-921f-4581-a5d4-8fcc57387afc-mountpoint-dir\") pod \"csi-hostpathplugin-dxlvp\" (UID: \"972678a7-921f-4581-a5d4-8fcc57387afc\") " pod="hostpath-provisioner/csi-hostpathplugin-dxlvp" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.246246 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/cf57b149-4889-439d-aaca-0ea06bbc1a45-auth-proxy-config\") pod \"machine-config-operator-74547568cd-68b7z\" (UID: \"cf57b149-4889-439d-aaca-0ea06bbc1a45\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-68b7z" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.246275 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/cf57b149-4889-439d-aaca-0ea06bbc1a45-proxy-tls\") pod \"machine-config-operator-74547568cd-68b7z\" (UID: \"cf57b149-4889-439d-aaca-0ea06bbc1a45\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-68b7z" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.246300 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6c3e154e-469a-47c1-a71a-b09597e9d303-config\") pod \"route-controller-manager-6576b87f9c-9c8v5\" (UID: \"6c3e154e-469a-47c1-a71a-b09597e9d303\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-9c8v5" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.246326 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/39e1644a-d213-4689-b241-667a5c3e0c21-signing-cabundle\") pod \"service-ca-9c57cc56f-48l5m\" (UID: \"39e1644a-d213-4689-b241-667a5c3e0c21\") " pod="openshift-service-ca/service-ca-9c57cc56f-48l5m" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.246348 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/e5439d4a-8ce5-48c3-b0db-9e4b24ebfcdd-secret-volume\") pod \"collect-profiles-29335905-vgmmx\" (UID: \"e5439d4a-8ce5-48c3-b0db-9e4b24ebfcdd\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29335905-vgmmx" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.247729 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/e5439d4a-8ce5-48c3-b0db-9e4b24ebfcdd-config-volume\") pod \"collect-profiles-29335905-vgmmx\" (UID: \"e5439d4a-8ce5-48c3-b0db-9e4b24ebfcdd\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29335905-vgmmx" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.247772 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/bdd198b5-674e-4bf8-afb5-15dbd2987ec8-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-cs7dc\" (UID: \"bdd198b5-674e-4bf8-afb5-15dbd2987ec8\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-cs7dc" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.247816 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bdw6s\" (UID: \"3de03a8d-34d7-416d-8f1b-330ad429a3b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-bdw6s" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.247884 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/325af4a3-57e0-4c68-8c7f-153566048283-apiservice-cert\") pod \"packageserver-d55dfcdfc-g95pt\" (UID: \"325af4a3-57e0-4c68-8c7f-153566048283\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-g95pt" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.247913 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/6c3e154e-469a-47c1-a71a-b09597e9d303-client-ca\") pod \"route-controller-manager-6576b87f9c-9c8v5\" (UID: \"6c3e154e-469a-47c1-a71a-b09597e9d303\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-9c8v5" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.247938 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3a8f8093-7e4c-450f-8eab-72d11ae9c846-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-jtrv7\" (UID: \"3a8f8093-7e4c-450f-8eab-72d11ae9c846\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-jtrv7" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.248574 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bdd198b5-674e-4bf8-afb5-15dbd2987ec8-config\") pod \"kube-controller-manager-operator-78b949d7b-cs7dc\" (UID: \"bdd198b5-674e-4bf8-afb5-15dbd2987ec8\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-cs7dc" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.249309 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/cf57b149-4889-439d-aaca-0ea06bbc1a45-images\") pod \"machine-config-operator-74547568cd-68b7z\" (UID: \"cf57b149-4889-439d-aaca-0ea06bbc1a45\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-68b7z" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.249588 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/972678a7-921f-4581-a5d4-8fcc57387afc-csi-data-dir\") pod \"csi-hostpathplugin-dxlvp\" (UID: \"972678a7-921f-4581-a5d4-8fcc57387afc\") " pod="hostpath-provisioner/csi-hostpathplugin-dxlvp" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.249831 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-sxxvx" event={"ID":"11f4f0ca-cc1a-4f4e-bdb7-071e41be12ee","Type":"ContainerStarted","Data":"04560c092f90580faf9cd61d39f7f14b8b0e600dbb0aa8851e2f5d2679b745b1"} Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.260630 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-jsvs2" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.263498 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/88b86aaf-78be-4bbb-ad2f-a404029803b3-config-volume\") pod \"dns-default-z5h6j\" (UID: \"88b86aaf-78be-4bbb-ad2f-a404029803b3\") " pod="openshift-dns/dns-default-z5h6j" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.264370 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/972678a7-921f-4581-a5d4-8fcc57387afc-socket-dir\") pod \"csi-hostpathplugin-dxlvp\" (UID: \"972678a7-921f-4581-a5d4-8fcc57387afc\") " pod="hostpath-provisioner/csi-hostpathplugin-dxlvp" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.264729 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/972678a7-921f-4581-a5d4-8fcc57387afc-plugins-dir\") pod \"csi-hostpathplugin-dxlvp\" (UID: \"972678a7-921f-4581-a5d4-8fcc57387afc\") " pod="hostpath-provisioner/csi-hostpathplugin-dxlvp" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.265174 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/3de03a8d-34d7-416d-8f1b-330ad429a3b1-bound-sa-token\") pod \"image-registry-697d97f7c8-bdw6s\" (UID: \"3de03a8d-34d7-416d-8f1b-330ad429a3b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-bdw6s" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.265347 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/136055f4-c38a-4cb1-8259-fcf1143c0bcf-config\") pod \"service-ca-operator-777779d784-xndjl\" (UID: \"136055f4-c38a-4cb1-8259-fcf1143c0bcf\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-xndjl" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.265519 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6c3e154e-469a-47c1-a71a-b09597e9d303-config\") pod \"route-controller-manager-6576b87f9c-9c8v5\" (UID: \"6c3e154e-469a-47c1-a71a-b09597e9d303\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-9c8v5" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.265889 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/972678a7-921f-4581-a5d4-8fcc57387afc-mountpoint-dir\") pod \"csi-hostpathplugin-dxlvp\" (UID: \"972678a7-921f-4581-a5d4-8fcc57387afc\") " pod="hostpath-provisioner/csi-hostpathplugin-dxlvp" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.266463 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/39e1644a-d213-4689-b241-667a5c3e0c21-signing-cabundle\") pod \"service-ca-9c57cc56f-48l5m\" (UID: \"39e1644a-d213-4689-b241-667a5c3e0c21\") " pod="openshift-service-ca/service-ca-9c57cc56f-48l5m" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.267684 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/cf57b149-4889-439d-aaca-0ea06bbc1a45-auth-proxy-config\") pod \"machine-config-operator-74547568cd-68b7z\" (UID: \"cf57b149-4889-439d-aaca-0ea06bbc1a45\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-68b7z" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.267711 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/972678a7-921f-4581-a5d4-8fcc57387afc-registration-dir\") pod \"csi-hostpathplugin-dxlvp\" (UID: \"972678a7-921f-4581-a5d4-8fcc57387afc\") " pod="hostpath-provisioner/csi-hostpathplugin-dxlvp" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.268574 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/88b86aaf-78be-4bbb-ad2f-a404029803b3-metrics-tls\") pod \"dns-default-z5h6j\" (UID: \"88b86aaf-78be-4bbb-ad2f-a404029803b3\") " pod="openshift-dns/dns-default-z5h6j" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.269106 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bdd198b5-674e-4bf8-afb5-15dbd2987ec8-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-cs7dc\" (UID: \"bdd198b5-674e-4bf8-afb5-15dbd2987ec8\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-cs7dc" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.269868 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6c3e154e-469a-47c1-a71a-b09597e9d303-serving-cert\") pod \"route-controller-manager-6576b87f9c-9c8v5\" (UID: \"6c3e154e-469a-47c1-a71a-b09597e9d303\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-9c8v5" Oct 11 03:57:27 crc kubenswrapper[4798]: E1011 03:57:27.272733 4798 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-11 03:57:27.772704847 +0000 UTC m=+143.108994533 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bdw6s" (UID: "3de03a8d-34d7-416d-8f1b-330ad429a3b1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.272726 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/39e1644a-d213-4689-b241-667a5c3e0c21-signing-key\") pod \"service-ca-9c57cc56f-48l5m\" (UID: \"39e1644a-d213-4689-b241-667a5c3e0c21\") " pod="openshift-service-ca/service-ca-9c57cc56f-48l5m" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.274670 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/6c3e154e-469a-47c1-a71a-b09597e9d303-client-ca\") pod \"route-controller-manager-6576b87f9c-9c8v5\" (UID: \"6c3e154e-469a-47c1-a71a-b09597e9d303\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-9c8v5" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.275167 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qncpc\" (UniqueName: \"kubernetes.io/projected/7965ee7e-8332-46c1-aff0-c2d96ea976dc-kube-api-access-qncpc\") pod \"migrator-59844c95c7-g52t6\" (UID: \"7965ee7e-8332-46c1-aff0-c2d96ea976dc\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-g52t6" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.282318 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/325af4a3-57e0-4c68-8c7f-153566048283-apiservice-cert\") pod \"packageserver-d55dfcdfc-g95pt\" (UID: \"325af4a3-57e0-4c68-8c7f-153566048283\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-g95pt" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.285582 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-tv8br" event={"ID":"467fda03-7ce6-41c9-967c-6e782bd68f90","Type":"ContainerStarted","Data":"1d0975deb120a0f2806b8274af1e0b8f2a5c8145bfbb0ac55e7dafab561645ab"} Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.287546 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/6bf5263f-746e-4224-a63b-9a08d32e9e77-cert\") pod \"ingress-canary-rdggm\" (UID: \"6bf5263f-746e-4224-a63b-9a08d32e9e77\") " pod="openshift-ingress-canary/ingress-canary-rdggm" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.287921 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/136055f4-c38a-4cb1-8259-fcf1143c0bcf-serving-cert\") pod \"service-ca-operator-777779d784-xndjl\" (UID: \"136055f4-c38a-4cb1-8259-fcf1143c0bcf\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-xndjl" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.294065 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/5626f7eb-4f04-4292-a479-fadf2514fa78-profile-collector-cert\") pod \"catalog-operator-68c6474976-64mrh\" (UID: \"5626f7eb-4f04-4292-a479-fadf2514fa78\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-64mrh" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.297731 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-vb9ln" event={"ID":"bf88f9b3-812c-45d1-b8a1-58abe81c9d51","Type":"ContainerStarted","Data":"afdf486e41f2f0f9a1a07c2e594c61a138641248c7e223009b19a2d09aa7714e"} Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.297797 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-vb9ln" event={"ID":"bf88f9b3-812c-45d1-b8a1-58abe81c9d51","Type":"ContainerStarted","Data":"4ad73b378121db11248d0a3dda9ca4fda2748a4b402d377fb94be6a37043a227"} Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.297748 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/f001bcce-3b27-477b-9ebf-46fdb31d546c-node-bootstrap-token\") pod \"machine-config-server-98vrc\" (UID: \"f001bcce-3b27-477b-9ebf-46fdb31d546c\") " pod="openshift-machine-config-operator/machine-config-server-98vrc" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.298276 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f8n5k\" (UniqueName: \"kubernetes.io/projected/550d113c-cbae-420d-a970-3a266a94134c-kube-api-access-f8n5k\") pod \"machine-config-controller-84d6567774-lplcw\" (UID: \"550d113c-cbae-420d-a970-3a266a94134c\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-lplcw" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.300680 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/325af4a3-57e0-4c68-8c7f-153566048283-webhook-cert\") pod \"packageserver-d55dfcdfc-g95pt\" (UID: \"325af4a3-57e0-4c68-8c7f-153566048283\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-g95pt" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.301256 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/5626f7eb-4f04-4292-a479-fadf2514fa78-srv-cert\") pod \"catalog-operator-68c6474976-64mrh\" (UID: \"5626f7eb-4f04-4292-a479-fadf2514fa78\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-64mrh" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.301571 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/e5439d4a-8ce5-48c3-b0db-9e4b24ebfcdd-config-volume\") pod \"collect-profiles-29335905-vgmmx\" (UID: \"e5439d4a-8ce5-48c3-b0db-9e4b24ebfcdd\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29335905-vgmmx" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.302376 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/cf57b149-4889-439d-aaca-0ea06bbc1a45-proxy-tls\") pod \"machine-config-operator-74547568cd-68b7z\" (UID: \"cf57b149-4889-439d-aaca-0ea06bbc1a45\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-68b7z" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.302789 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3a8f8093-7e4c-450f-8eab-72d11ae9c846-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-jtrv7\" (UID: \"3a8f8093-7e4c-450f-8eab-72d11ae9c846\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-jtrv7" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.303482 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/e5439d4a-8ce5-48c3-b0db-9e4b24ebfcdd-secret-volume\") pod \"collect-profiles-29335905-vgmmx\" (UID: \"e5439d4a-8ce5-48c3-b0db-9e4b24ebfcdd\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29335905-vgmmx" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.309057 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"certs\" (UniqueName: \"kubernetes.io/secret/f001bcce-3b27-477b-9ebf-46fdb31d546c-certs\") pod \"machine-config-server-98vrc\" (UID: \"f001bcce-3b27-477b-9ebf-46fdb31d546c\") " pod="openshift-machine-config-operator/machine-config-server-98vrc" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.309654 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tgcgj\" (UniqueName: \"kubernetes.io/projected/55408d92-301d-4b54-9cb1-09dc11423c33-kube-api-access-tgcgj\") pod \"downloads-7954f5f757-g2k9v\" (UID: \"55408d92-301d-4b54-9cb1-09dc11423c33\") " pod="openshift-console/downloads-7954f5f757-g2k9v" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.324136 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qkpg9\" (UniqueName: \"kubernetes.io/projected/3de03a8d-34d7-416d-8f1b-330ad429a3b1-kube-api-access-qkpg9\") pod \"image-registry-697d97f7c8-bdw6s\" (UID: \"3de03a8d-34d7-416d-8f1b-330ad429a3b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-bdw6s" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.343022 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zpbbg\" (UniqueName: \"kubernetes.io/projected/78405f10-b1f1-4401-8a3e-579ff5a739e1-kube-api-access-zpbbg\") pod \"router-default-5444994796-kgrgq\" (UID: \"78405f10-b1f1-4401-8a3e-579ff5a739e1\") " pod="openshift-ingress/router-default-5444994796-kgrgq" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.349310 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 03:57:27 crc kubenswrapper[4798]: E1011 03:57:27.349901 4798 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-11 03:57:27.849884263 +0000 UTC m=+143.186173939 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.373023 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-zrx8s"] Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.377575 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xfnw9\" (UniqueName: \"kubernetes.io/projected/45c5e131-b2e8-4a3f-8c26-0dfe5cacee11-kube-api-access-xfnw9\") pod \"marketplace-operator-79b997595-dp4kr\" (UID: \"45c5e131-b2e8-4a3f-8c26-0dfe5cacee11\") " pod="openshift-marketplace/marketplace-operator-79b997595-dp4kr" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.395593 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-kw978"] Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.396701 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6rgsm\" (UniqueName: \"kubernetes.io/projected/f26ca59f-fc9c-4cbd-9c82-bf0b756f9b88-kube-api-access-6rgsm\") pod \"control-plane-machine-set-operator-78cbb6b69f-jspml\" (UID: \"f26ca59f-fc9c-4cbd-9c82-bf0b756f9b88\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-jspml" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.407875 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-5bh8x" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.409117 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xmxss\" (UniqueName: \"kubernetes.io/projected/76d9d582-6e5f-4146-859a-5474e90dbe64-kube-api-access-xmxss\") pod \"etcd-operator-b45778765-xv9zg\" (UID: \"76d9d582-6e5f-4146-859a-5474e90dbe64\") " pod="openshift-etcd-operator/etcd-operator-b45778765-xv9zg" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.441845 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-k8p2d"] Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.442233 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-4wcbm"] Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.451188 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bdw6s\" (UID: \"3de03a8d-34d7-416d-8f1b-330ad429a3b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-bdw6s" Oct 11 03:57:27 crc kubenswrapper[4798]: E1011 03:57:27.451597 4798 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-11 03:57:27.951581158 +0000 UTC m=+143.287870844 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bdw6s" (UID: "3de03a8d-34d7-416d-8f1b-330ad429a3b1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.453985 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-kgrgq" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.463685 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-jspml" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.464197 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-czhff\" (UniqueName: \"kubernetes.io/projected/972678a7-921f-4581-a5d4-8fcc57387afc-kube-api-access-czhff\") pod \"csi-hostpathplugin-dxlvp\" (UID: \"972678a7-921f-4581-a5d4-8fcc57387afc\") " pod="hostpath-provisioner/csi-hostpathplugin-dxlvp" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.466233 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-29frm\" (UniqueName: \"kubernetes.io/projected/f001bcce-3b27-477b-9ebf-46fdb31d546c-kube-api-access-29frm\") pod \"machine-config-server-98vrc\" (UID: \"f001bcce-3b27-477b-9ebf-46fdb31d546c\") " pod="openshift-machine-config-operator/machine-config-server-98vrc" Oct 11 03:57:27 crc kubenswrapper[4798]: W1011 03:57:27.474120 4798 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9398ffbc_b6a8_4294_83d3_1c80dd716113.slice/crio-d08b6cc7295e31f7da63863747077a807e6220fe26d43e2febe59ef405d01d30 WatchSource:0}: Error finding container d08b6cc7295e31f7da63863747077a807e6220fe26d43e2febe59ef405d01d30: Status 404 returned error can't find the container with id d08b6cc7295e31f7da63863747077a807e6220fe26d43e2febe59ef405d01d30 Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.484747 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-52hs6\" (UniqueName: \"kubernetes.io/projected/39e1644a-d213-4689-b241-667a5c3e0c21-kube-api-access-52hs6\") pod \"service-ca-9c57cc56f-48l5m\" (UID: \"39e1644a-d213-4689-b241-667a5c3e0c21\") " pod="openshift-service-ca/service-ca-9c57cc56f-48l5m" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.485116 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-g2k9v" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.492158 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-g52t6" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.498384 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-xv9zg" Oct 11 03:57:27 crc kubenswrapper[4798]: W1011 03:57:27.498638 4798 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd9eedcac_7bb5_4ddf_a856_387e2fb253b1.slice/crio-a0fedc651405a69ca5e1f6bca83f6541ae0c440d0bf5bf742615294613b606c5 WatchSource:0}: Error finding container a0fedc651405a69ca5e1f6bca83f6541ae0c440d0bf5bf742615294613b606c5: Status 404 returned error can't find the container with id a0fedc651405a69ca5e1f6bca83f6541ae0c440d0bf5bf742615294613b606c5 Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.507221 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w4rqk\" (UniqueName: \"kubernetes.io/projected/136055f4-c38a-4cb1-8259-fcf1143c0bcf-kube-api-access-w4rqk\") pod \"service-ca-operator-777779d784-xndjl\" (UID: \"136055f4-c38a-4cb1-8259-fcf1143c0bcf\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-xndjl" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.530448 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-mdkrr"] Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.531865 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-lplcw" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.542700 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j9c42\" (UniqueName: \"kubernetes.io/projected/325af4a3-57e0-4c68-8c7f-153566048283-kube-api-access-j9c42\") pod \"packageserver-d55dfcdfc-g95pt\" (UID: \"325af4a3-57e0-4c68-8c7f-153566048283\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-g95pt" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.550047 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-84qkq\" (UniqueName: \"kubernetes.io/projected/88b86aaf-78be-4bbb-ad2f-a404029803b3-kube-api-access-84qkq\") pod \"dns-default-z5h6j\" (UID: \"88b86aaf-78be-4bbb-ad2f-a404029803b3\") " pod="openshift-dns/dns-default-z5h6j" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.552494 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 03:57:27 crc kubenswrapper[4798]: E1011 03:57:27.553136 4798 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-11 03:57:28.053093967 +0000 UTC m=+143.389383643 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.557651 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-db49p"] Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.562942 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-lhsx5"] Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.576258 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-nptjh"] Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.579404 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n9jbz\" (UniqueName: \"kubernetes.io/projected/3a8f8093-7e4c-450f-8eab-72d11ae9c846-kube-api-access-n9jbz\") pod \"package-server-manager-789f6589d5-jtrv7\" (UID: \"3a8f8093-7e4c-450f-8eab-72d11ae9c846\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-jtrv7" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.584701 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-g95pt" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.585786 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t66m8\" (UniqueName: \"kubernetes.io/projected/cf57b149-4889-439d-aaca-0ea06bbc1a45-kube-api-access-t66m8\") pod \"machine-config-operator-74547568cd-68b7z\" (UID: \"cf57b149-4889-439d-aaca-0ea06bbc1a45\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-68b7z" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.586852 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-5lpj9"] Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.599291 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-48l5m" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.612656 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-rxtt6"] Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.614703 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-xndjl" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.618631 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-49pc7\" (UniqueName: \"kubernetes.io/projected/6bf5263f-746e-4224-a63b-9a08d32e9e77-kube-api-access-49pc7\") pod \"ingress-canary-rdggm\" (UID: \"6bf5263f-746e-4224-a63b-9a08d32e9e77\") " pod="openshift-ingress-canary/ingress-canary-rdggm" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.623678 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-rdggm" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.628240 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m9d7x\" (UniqueName: \"kubernetes.io/projected/e5439d4a-8ce5-48c3-b0db-9e4b24ebfcdd-kube-api-access-m9d7x\") pod \"collect-profiles-29335905-vgmmx\" (UID: \"e5439d4a-8ce5-48c3-b0db-9e4b24ebfcdd\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29335905-vgmmx" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.632371 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-dp4kr" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.640979 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8vfz2\" (UniqueName: \"kubernetes.io/projected/6c3e154e-469a-47c1-a71a-b09597e9d303-kube-api-access-8vfz2\") pod \"route-controller-manager-6576b87f9c-9c8v5\" (UID: \"6c3e154e-469a-47c1-a71a-b09597e9d303\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-9c8v5" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.644269 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-dxlvp" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.650357 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-n9wgt"] Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.655776 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-z5h6j" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.656899 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bdw6s\" (UID: \"3de03a8d-34d7-416d-8f1b-330ad429a3b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-bdw6s" Oct 11 03:57:27 crc kubenswrapper[4798]: E1011 03:57:27.657266 4798 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-11 03:57:28.157249227 +0000 UTC m=+143.493538913 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bdw6s" (UID: "3de03a8d-34d7-416d-8f1b-330ad429a3b1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.659586 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-98vrc" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.661060 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dz6m8\" (UniqueName: \"kubernetes.io/projected/5626f7eb-4f04-4292-a479-fadf2514fa78-kube-api-access-dz6m8\") pod \"catalog-operator-68c6474976-64mrh\" (UID: \"5626f7eb-4f04-4292-a479-fadf2514fa78\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-64mrh" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.665385 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-jsvs2"] Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.694354 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/bdd198b5-674e-4bf8-afb5-15dbd2987ec8-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-cs7dc\" (UID: \"bdd198b5-674e-4bf8-afb5-15dbd2987ec8\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-cs7dc" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.705955 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-qv29k"] Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.742602 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-wfsmq"] Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.758623 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 03:57:27 crc kubenswrapper[4798]: E1011 03:57:27.759042 4798 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-11 03:57:28.259026834 +0000 UTC m=+143.595316520 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.794899 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-5bh8x"] Oct 11 03:57:27 crc kubenswrapper[4798]: W1011 03:57:27.847552 4798 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9bb478ba_d274_4a8f_a67d_635740a72f34.slice/crio-a7094b12d377532e4cc490d3a3eb868f11f70d3f82df3d1ad0185133b1d856c4 WatchSource:0}: Error finding container a7094b12d377532e4cc490d3a3eb868f11f70d3f82df3d1ad0185133b1d856c4: Status 404 returned error can't find the container with id a7094b12d377532e4cc490d3a3eb868f11f70d3f82df3d1ad0185133b1d856c4 Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.853670 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-cs7dc" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.861577 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bdw6s\" (UID: \"3de03a8d-34d7-416d-8f1b-330ad429a3b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-bdw6s" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.861945 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-jtrv7" Oct 11 03:57:27 crc kubenswrapper[4798]: E1011 03:57:27.862558 4798 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-11 03:57:28.362544304 +0000 UTC m=+143.698833990 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bdw6s" (UID: "3de03a8d-34d7-416d-8f1b-330ad429a3b1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.868842 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-68b7z" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.877775 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-64mrh" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.898025 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29335905-vgmmx" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.907061 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-9c8v5" Oct 11 03:57:27 crc kubenswrapper[4798]: I1011 03:57:27.962729 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 03:57:27 crc kubenswrapper[4798]: E1011 03:57:27.963215 4798 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-11 03:57:28.463194428 +0000 UTC m=+143.799484114 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 03:57:27 crc kubenswrapper[4798]: W1011 03:57:27.982945 4798 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod77b085fa_7b0d_45e0_9dec_5adc0eeecb61.slice/crio-b61bba19402e27c7f35a407045e06807597c7d7453a6963d189ed83abb5b175f WatchSource:0}: Error finding container b61bba19402e27c7f35a407045e06807597c7d7453a6963d189ed83abb5b175f: Status 404 returned error can't find the container with id b61bba19402e27c7f35a407045e06807597c7d7453a6963d189ed83abb5b175f Oct 11 03:57:28 crc kubenswrapper[4798]: I1011 03:57:28.066453 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bdw6s\" (UID: \"3de03a8d-34d7-416d-8f1b-330ad429a3b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-bdw6s" Oct 11 03:57:28 crc kubenswrapper[4798]: E1011 03:57:28.067024 4798 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-11 03:57:28.567001067 +0000 UTC m=+143.903290753 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bdw6s" (UID: "3de03a8d-34d7-416d-8f1b-330ad429a3b1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 03:57:28 crc kubenswrapper[4798]: I1011 03:57:28.167919 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 03:57:28 crc kubenswrapper[4798]: E1011 03:57:28.168185 4798 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-11 03:57:28.668143716 +0000 UTC m=+144.004433392 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 03:57:28 crc kubenswrapper[4798]: I1011 03:57:28.168590 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bdw6s\" (UID: \"3de03a8d-34d7-416d-8f1b-330ad429a3b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-bdw6s" Oct 11 03:57:28 crc kubenswrapper[4798]: E1011 03:57:28.168949 4798 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-11 03:57:28.668942339 +0000 UTC m=+144.005232025 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bdw6s" (UID: "3de03a8d-34d7-416d-8f1b-330ad429a3b1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 03:57:28 crc kubenswrapper[4798]: I1011 03:57:28.274086 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 03:57:28 crc kubenswrapper[4798]: E1011 03:57:28.275014 4798 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-11 03:57:28.774997596 +0000 UTC m=+144.111287282 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 03:57:28 crc kubenswrapper[4798]: I1011 03:57:28.321338 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-nptjh" event={"ID":"0026b84d-ad3a-41e1-bcd3-9a67f7d64ea0","Type":"ContainerStarted","Data":"62f341ff2447e88abd9e9831237fc4468b859dc9e9550b7d3b27356a9fa8b3d7"} Oct 11 03:57:28 crc kubenswrapper[4798]: I1011 03:57:28.324610 4798 generic.go:334] "Generic (PLEG): container finished" podID="467fda03-7ce6-41c9-967c-6e782bd68f90" containerID="18ad16af044876f6395cc95adae8c55efcb638b2317218fd71b1d8315e5fe556" exitCode=0 Oct 11 03:57:28 crc kubenswrapper[4798]: I1011 03:57:28.324672 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-tv8br" event={"ID":"467fda03-7ce6-41c9-967c-6e782bd68f90","Type":"ContainerDied","Data":"18ad16af044876f6395cc95adae8c55efcb638b2317218fd71b1d8315e5fe556"} Oct 11 03:57:28 crc kubenswrapper[4798]: I1011 03:57:28.330800 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-wqjfc" event={"ID":"29c4d9ea-078d-4ed3-a56a-c0a29887b6a1","Type":"ContainerStarted","Data":"d4ec035ff74a1dccc3f56595bd34cf75edd44d182c69a447a2c5cb5de92af659"} Oct 11 03:57:28 crc kubenswrapper[4798]: I1011 03:57:28.330869 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-wqjfc" event={"ID":"29c4d9ea-078d-4ed3-a56a-c0a29887b6a1","Type":"ContainerStarted","Data":"cfa1620c85bab2ff4fd272db0bde74243b60eb28d10aa2f546685772e5df3a9b"} Oct 11 03:57:28 crc kubenswrapper[4798]: I1011 03:57:28.332041 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-kgrgq" event={"ID":"78405f10-b1f1-4401-8a3e-579ff5a739e1","Type":"ContainerStarted","Data":"164dbf9846c92303e57bcdf0e778e84e93688246c185f83f1e0664248a0a41af"} Oct 11 03:57:28 crc kubenswrapper[4798]: I1011 03:57:28.334190 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-5bh8x" event={"ID":"77b085fa-7b0d-45e0-9dec-5adc0eeecb61","Type":"ContainerStarted","Data":"b61bba19402e27c7f35a407045e06807597c7d7453a6963d189ed83abb5b175f"} Oct 11 03:57:28 crc kubenswrapper[4798]: I1011 03:57:28.339338 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-lhsx5" event={"ID":"46ccafd1-b791-4e38-8d42-13aebe2909b2","Type":"ContainerStarted","Data":"51b80c961358771dfea635e4833a9d1d4630bbcc6533cac7a52e6746cc60fd59"} Oct 11 03:57:28 crc kubenswrapper[4798]: I1011 03:57:28.341572 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-qv29k" event={"ID":"b193848f-5963-432c-964b-460721f701c3","Type":"ContainerStarted","Data":"0dfe79c1713d541b9f38e8aa711b4d7bdb0668383e60f9e22069797f807e5f53"} Oct 11 03:57:28 crc kubenswrapper[4798]: I1011 03:57:28.344958 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-5lpj9" event={"ID":"79cb45d2-e74c-4bda-8464-2c2bbcdbcc4a","Type":"ContainerStarted","Data":"3662d386db9616cb0b579dfa8696f618885216078f390a259ef21f8c31d5f5a4"} Oct 11 03:57:28 crc kubenswrapper[4798]: I1011 03:57:28.347293 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-jsvs2" event={"ID":"056140e0-6e96-41d3-8d80-bd9e33e56d8b","Type":"ContainerStarted","Data":"b2a9e3fc9b273bacfaec1d33962b728d551c09758520b918b8bc406d52b341fd"} Oct 11 03:57:28 crc kubenswrapper[4798]: I1011 03:57:28.351009 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-wfsmq" event={"ID":"9bb478ba-d274-4a8f-a67d-635740a72f34","Type":"ContainerStarted","Data":"a7094b12d377532e4cc490d3a3eb868f11f70d3f82df3d1ad0185133b1d856c4"} Oct 11 03:57:28 crc kubenswrapper[4798]: I1011 03:57:28.352016 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-mdkrr" event={"ID":"e2211eec-aa3e-429f-9237-fc13fad065ed","Type":"ContainerStarted","Data":"ce45fa46db5f6b986f3f0fb276a0edc26be5884c9f283cf7bc0f890cb920ae7f"} Oct 11 03:57:28 crc kubenswrapper[4798]: I1011 03:57:28.353035 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-kw978" event={"ID":"f9bbcb27-587d-4994-8ef7-25b9ac6bf912","Type":"ContainerStarted","Data":"37dc8b9107c92444751e9f9b63d4b2a35cbe29d756d1646ee83c8f63f3bd1580"} Oct 11 03:57:28 crc kubenswrapper[4798]: I1011 03:57:28.353973 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-rxtt6" event={"ID":"03cf12f7-f45c-46d4-a089-97618c3865b2","Type":"ContainerStarted","Data":"555b518ebeebbf65a4192cff8e4a2300d88e6eb21db1fa0d0a1ed8d9e9ce8949"} Oct 11 03:57:28 crc kubenswrapper[4798]: I1011 03:57:28.357345 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-zrx8s" event={"ID":"6c3ed6b4-3ffd-462d-b465-f509ef85858b","Type":"ContainerStarted","Data":"191ac4a60825b879bea578ed8b10b30cad3c0f7a451a2a314a7e9c7f5a62b0f3"} Oct 11 03:57:28 crc kubenswrapper[4798]: I1011 03:57:28.357383 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-zrx8s" event={"ID":"6c3ed6b4-3ffd-462d-b465-f509ef85858b","Type":"ContainerStarted","Data":"04434d158f86ea22931cb4ba392e181f626952ddd87965908920cd91a26e142f"} Oct 11 03:57:28 crc kubenswrapper[4798]: I1011 03:57:28.360801 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-vb9ln" event={"ID":"bf88f9b3-812c-45d1-b8a1-58abe81c9d51","Type":"ContainerStarted","Data":"313d91a5e58c5e7d1447bd6632cd54135752ad0e57a7c93f852a210a84a42263"} Oct 11 03:57:28 crc kubenswrapper[4798]: I1011 03:57:28.362945 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-ndqkx" event={"ID":"4ae1d839-823d-4694-9159-b32ea65ca9d5","Type":"ContainerStarted","Data":"83ce4a6e7d6daa195009cb52b4af37192ce44d0333c6d9ef61aa77accfe0b72e"} Oct 11 03:57:28 crc kubenswrapper[4798]: I1011 03:57:28.362974 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-ndqkx" event={"ID":"4ae1d839-823d-4694-9159-b32ea65ca9d5","Type":"ContainerStarted","Data":"674bfd7c0e5d5c0ced23aec4ad870e9f25706b6203f0c241bc0b99a2d3cd4527"} Oct 11 03:57:28 crc kubenswrapper[4798]: I1011 03:57:28.364816 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-sxxvx" event={"ID":"11f4f0ca-cc1a-4f4e-bdb7-071e41be12ee","Type":"ContainerStarted","Data":"ab7ede9ce5c057696a56df4254e645e0f32af1ee8eb6862206059c5ef501eb63"} Oct 11 03:57:28 crc kubenswrapper[4798]: W1011 03:57:28.365260 4798 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf001bcce_3b27_477b_9ebf_46fdb31d546c.slice/crio-9ab8731d5e75afd0c0b9f2f01e14a63ab5564d1665989d53d490cfbbf53fe00b WatchSource:0}: Error finding container 9ab8731d5e75afd0c0b9f2f01e14a63ab5564d1665989d53d490cfbbf53fe00b: Status 404 returned error can't find the container with id 9ab8731d5e75afd0c0b9f2f01e14a63ab5564d1665989d53d490cfbbf53fe00b Oct 11 03:57:28 crc kubenswrapper[4798]: I1011 03:57:28.369058 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-db49p" event={"ID":"f196154c-0442-4600-967f-74eb36876b52","Type":"ContainerStarted","Data":"8175286bef9ebc0368cf88488ccef15425442b49fea97c5f927c6ef4043a39b7"} Oct 11 03:57:28 crc kubenswrapper[4798]: I1011 03:57:28.370576 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-n9wgt" event={"ID":"687b3ca2-3a7f-4d1d-be5b-8c3dfe9082e5","Type":"ContainerStarted","Data":"8aaa91648c1351542ac7f7d11360e4bbc46570c25710bf28d93a5187cbc065ad"} Oct 11 03:57:28 crc kubenswrapper[4798]: I1011 03:57:28.376465 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-4wcbm" event={"ID":"d9eedcac-7bb5-4ddf-a856-387e2fb253b1","Type":"ContainerStarted","Data":"f85079db10fc845d82c98835a1b7dc1ff421eb8e6e7f3522aea0c54237713707"} Oct 11 03:57:28 crc kubenswrapper[4798]: I1011 03:57:28.376522 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-4wcbm" event={"ID":"d9eedcac-7bb5-4ddf-a856-387e2fb253b1","Type":"ContainerStarted","Data":"a0fedc651405a69ca5e1f6bca83f6541ae0c440d0bf5bf742615294613b606c5"} Oct 11 03:57:28 crc kubenswrapper[4798]: I1011 03:57:28.377632 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-879f6c89f-4wcbm" Oct 11 03:57:28 crc kubenswrapper[4798]: I1011 03:57:28.382237 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bdw6s\" (UID: \"3de03a8d-34d7-416d-8f1b-330ad429a3b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-bdw6s" Oct 11 03:57:28 crc kubenswrapper[4798]: E1011 03:57:28.382703 4798 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-11 03:57:28.882690242 +0000 UTC m=+144.218979928 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bdw6s" (UID: "3de03a8d-34d7-416d-8f1b-330ad429a3b1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 03:57:28 crc kubenswrapper[4798]: I1011 03:57:28.387606 4798 patch_prober.go:28] interesting pod/controller-manager-879f6c89f-4wcbm container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.9:8443/healthz\": dial tcp 10.217.0.9:8443: connect: connection refused" start-of-body= Oct 11 03:57:28 crc kubenswrapper[4798]: I1011 03:57:28.387685 4798 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-879f6c89f-4wcbm" podUID="d9eedcac-7bb5-4ddf-a856-387e2fb253b1" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.9:8443/healthz\": dial tcp 10.217.0.9:8443: connect: connection refused" Oct 11 03:57:28 crc kubenswrapper[4798]: I1011 03:57:28.419576 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-k8p2d" event={"ID":"9398ffbc-b6a8-4294-83d3-1c80dd716113","Type":"ContainerStarted","Data":"d08b6cc7295e31f7da63863747077a807e6220fe26d43e2febe59ef405d01d30"} Oct 11 03:57:28 crc kubenswrapper[4798]: I1011 03:57:28.457188 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-jspml"] Oct 11 03:57:28 crc kubenswrapper[4798]: I1011 03:57:28.475966 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-8jgvs" event={"ID":"806f3022-04e6-4246-a1e3-e9f72066aed3","Type":"ContainerStarted","Data":"3fd253874201efd6f8c18d94b58d7e124665fea05334d94279161580133dfc21"} Oct 11 03:57:28 crc kubenswrapper[4798]: I1011 03:57:28.476024 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-8jgvs" event={"ID":"806f3022-04e6-4246-a1e3-e9f72066aed3","Type":"ContainerStarted","Data":"17ab670f4cf06d174a1d20818672202181e227c19bd0af9e3f1bcfbb3eb29911"} Oct 11 03:57:28 crc kubenswrapper[4798]: I1011 03:57:28.476040 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-8jgvs" event={"ID":"806f3022-04e6-4246-a1e3-e9f72066aed3","Type":"ContainerStarted","Data":"d3e95cb49e60bb7c317625fb0efccebabc4f8645758ee94f374b57ebeb282270"} Oct 11 03:57:28 crc kubenswrapper[4798]: I1011 03:57:28.483378 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 03:57:28 crc kubenswrapper[4798]: E1011 03:57:28.486764 4798 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-11 03:57:28.986729417 +0000 UTC m=+144.323019123 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 03:57:28 crc kubenswrapper[4798]: I1011 03:57:28.490515 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-g2k9v"] Oct 11 03:57:28 crc kubenswrapper[4798]: I1011 03:57:28.496581 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-xv9zg"] Oct 11 03:57:28 crc kubenswrapper[4798]: I1011 03:57:28.587716 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bdw6s\" (UID: \"3de03a8d-34d7-416d-8f1b-330ad429a3b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-bdw6s" Oct 11 03:57:28 crc kubenswrapper[4798]: E1011 03:57:28.588109 4798 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-11 03:57:29.088095322 +0000 UTC m=+144.424385008 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bdw6s" (UID: "3de03a8d-34d7-416d-8f1b-330ad429a3b1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 03:57:28 crc kubenswrapper[4798]: I1011 03:57:28.689368 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 03:57:28 crc kubenswrapper[4798]: E1011 03:57:28.690840 4798 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-11 03:57:29.190811609 +0000 UTC m=+144.527101295 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 03:57:28 crc kubenswrapper[4798]: I1011 03:57:28.792822 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bdw6s\" (UID: \"3de03a8d-34d7-416d-8f1b-330ad429a3b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-bdw6s" Oct 11 03:57:28 crc kubenswrapper[4798]: E1011 03:57:28.793560 4798 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-11 03:57:29.293543075 +0000 UTC m=+144.629832761 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bdw6s" (UID: "3de03a8d-34d7-416d-8f1b-330ad429a3b1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 03:57:28 crc kubenswrapper[4798]: I1011 03:57:28.793191 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-lplcw"] Oct 11 03:57:28 crc kubenswrapper[4798]: I1011 03:57:28.800017 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-g52t6"] Oct 11 03:57:28 crc kubenswrapper[4798]: I1011 03:57:28.869933 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-rdggm"] Oct 11 03:57:28 crc kubenswrapper[4798]: I1011 03:57:28.904869 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 03:57:28 crc kubenswrapper[4798]: E1011 03:57:28.905066 4798 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-11 03:57:29.405032225 +0000 UTC m=+144.741321911 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 03:57:28 crc kubenswrapper[4798]: I1011 03:57:28.905502 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bdw6s\" (UID: \"3de03a8d-34d7-416d-8f1b-330ad429a3b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-bdw6s" Oct 11 03:57:28 crc kubenswrapper[4798]: E1011 03:57:28.906666 4798 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-11 03:57:29.406656014 +0000 UTC m=+144.742945700 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bdw6s" (UID: "3de03a8d-34d7-416d-8f1b-330ad429a3b1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 03:57:29 crc kubenswrapper[4798]: I1011 03:57:29.019050 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 03:57:29 crc kubenswrapper[4798]: E1011 03:57:29.019647 4798 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-11 03:57:29.519623849 +0000 UTC m=+144.855913535 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 03:57:29 crc kubenswrapper[4798]: I1011 03:57:29.063282 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-xndjl"] Oct 11 03:57:29 crc kubenswrapper[4798]: I1011 03:57:29.088121 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-vb9ln" podStartSLOduration=124.088090873 podStartE2EDuration="2m4.088090873s" podCreationTimestamp="2025-10-11 03:55:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 03:57:29.069289336 +0000 UTC m=+144.405579022" watchObservedRunningTime="2025-10-11 03:57:29.088090873 +0000 UTC m=+144.424380559" Oct 11 03:57:29 crc kubenswrapper[4798]: I1011 03:57:29.094549 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-z5h6j"] Oct 11 03:57:29 crc kubenswrapper[4798]: I1011 03:57:29.101574 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-zrx8s" podStartSLOduration=124.101561558 podStartE2EDuration="2m4.101561558s" podCreationTimestamp="2025-10-11 03:55:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 03:57:29.100348612 +0000 UTC m=+144.436638298" watchObservedRunningTime="2025-10-11 03:57:29.101561558 +0000 UTC m=+144.437851244" Oct 11 03:57:29 crc kubenswrapper[4798]: I1011 03:57:29.120825 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bdw6s\" (UID: \"3de03a8d-34d7-416d-8f1b-330ad429a3b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-bdw6s" Oct 11 03:57:29 crc kubenswrapper[4798]: E1011 03:57:29.121238 4798 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-11 03:57:29.621223711 +0000 UTC m=+144.957513387 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bdw6s" (UID: "3de03a8d-34d7-416d-8f1b-330ad429a3b1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 03:57:29 crc kubenswrapper[4798]: W1011 03:57:29.158660 4798 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod136055f4_c38a_4cb1_8259_fcf1143c0bcf.slice/crio-2c7e095e0a4f4c9b40b87d60638f5444390f0ae079603d831a17edb4a51b5ad6 WatchSource:0}: Error finding container 2c7e095e0a4f4c9b40b87d60638f5444390f0ae079603d831a17edb4a51b5ad6: Status 404 returned error can't find the container with id 2c7e095e0a4f4c9b40b87d60638f5444390f0ae079603d831a17edb4a51b5ad6 Oct 11 03:57:29 crc kubenswrapper[4798]: I1011 03:57:29.159514 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-ndqkx" podStartSLOduration=124.159490065 podStartE2EDuration="2m4.159490065s" podCreationTimestamp="2025-10-11 03:55:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 03:57:29.145128102 +0000 UTC m=+144.481417788" watchObservedRunningTime="2025-10-11 03:57:29.159490065 +0000 UTC m=+144.495779751" Oct 11 03:57:29 crc kubenswrapper[4798]: I1011 03:57:29.171917 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-cs7dc"] Oct 11 03:57:29 crc kubenswrapper[4798]: I1011 03:57:29.200453 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-sxxvx" podStartSLOduration=124.200427979 podStartE2EDuration="2m4.200427979s" podCreationTimestamp="2025-10-11 03:55:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 03:57:29.194681316 +0000 UTC m=+144.530971002" watchObservedRunningTime="2025-10-11 03:57:29.200427979 +0000 UTC m=+144.536717695" Oct 11 03:57:29 crc kubenswrapper[4798]: I1011 03:57:29.201876 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-9c8v5"] Oct 11 03:57:29 crc kubenswrapper[4798]: I1011 03:57:29.206822 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-dp4kr"] Oct 11 03:57:29 crc kubenswrapper[4798]: I1011 03:57:29.211121 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-dxlvp"] Oct 11 03:57:29 crc kubenswrapper[4798]: I1011 03:57:29.212607 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-48l5m"] Oct 11 03:57:29 crc kubenswrapper[4798]: I1011 03:57:29.221627 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 03:57:29 crc kubenswrapper[4798]: E1011 03:57:29.223097 4798 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-11 03:57:29.723072321 +0000 UTC m=+145.059362007 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 03:57:29 crc kubenswrapper[4798]: I1011 03:57:29.234882 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-g95pt"] Oct 11 03:57:29 crc kubenswrapper[4798]: I1011 03:57:29.263414 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-879f6c89f-4wcbm" podStartSLOduration=124.263360596 podStartE2EDuration="2m4.263360596s" podCreationTimestamp="2025-10-11 03:55:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 03:57:29.220235116 +0000 UTC m=+144.556524802" watchObservedRunningTime="2025-10-11 03:57:29.263360596 +0000 UTC m=+144.599650282" Oct 11 03:57:29 crc kubenswrapper[4798]: I1011 03:57:29.296564 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-8jgvs" podStartSLOduration=124.296533555 podStartE2EDuration="2m4.296533555s" podCreationTimestamp="2025-10-11 03:55:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 03:57:29.261031215 +0000 UTC m=+144.597320901" watchObservedRunningTime="2025-10-11 03:57:29.296533555 +0000 UTC m=+144.632823241" Oct 11 03:57:29 crc kubenswrapper[4798]: I1011 03:57:29.328774 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bdw6s\" (UID: \"3de03a8d-34d7-416d-8f1b-330ad429a3b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-bdw6s" Oct 11 03:57:29 crc kubenswrapper[4798]: E1011 03:57:29.329986 4798 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-11 03:57:29.829970843 +0000 UTC m=+145.166260529 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bdw6s" (UID: "3de03a8d-34d7-416d-8f1b-330ad429a3b1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 03:57:29 crc kubenswrapper[4798]: I1011 03:57:29.434037 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 03:57:29 crc kubenswrapper[4798]: E1011 03:57:29.434407 4798 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-11 03:57:29.934358849 +0000 UTC m=+145.270648545 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 03:57:29 crc kubenswrapper[4798]: I1011 03:57:29.434463 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bdw6s\" (UID: \"3de03a8d-34d7-416d-8f1b-330ad429a3b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-bdw6s" Oct 11 03:57:29 crc kubenswrapper[4798]: E1011 03:57:29.434971 4798 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-11 03:57:29.934963967 +0000 UTC m=+145.271253653 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bdw6s" (UID: "3de03a8d-34d7-416d-8f1b-330ad429a3b1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 03:57:29 crc kubenswrapper[4798]: I1011 03:57:29.476758 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-68b7z"] Oct 11 03:57:29 crc kubenswrapper[4798]: I1011 03:57:29.477140 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-64mrh"] Oct 11 03:57:29 crc kubenswrapper[4798]: I1011 03:57:29.501732 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-kgrgq" event={"ID":"78405f10-b1f1-4401-8a3e-579ff5a739e1","Type":"ContainerStarted","Data":"2410127de0bdc8a8c3c02038e8bfd61d523afcd8b599e26ce810812306504f27"} Oct 11 03:57:29 crc kubenswrapper[4798]: I1011 03:57:29.508865 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29335905-vgmmx"] Oct 11 03:57:29 crc kubenswrapper[4798]: W1011 03:57:29.511222 4798 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5626f7eb_4f04_4292_a479_fadf2514fa78.slice/crio-3799d53b350377809537ae1a33286f10f01c19588953d88d088575d31dd92956 WatchSource:0}: Error finding container 3799d53b350377809537ae1a33286f10f01c19588953d88d088575d31dd92956: Status 404 returned error can't find the container with id 3799d53b350377809537ae1a33286f10f01c19588953d88d088575d31dd92956 Oct 11 03:57:29 crc kubenswrapper[4798]: I1011 03:57:29.511304 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-xv9zg" event={"ID":"76d9d582-6e5f-4146-859a-5474e90dbe64","Type":"ContainerStarted","Data":"79e8218eb70a42bd653e7a527eda36715ad652b158deb2f87fdc7fb176aec160"} Oct 11 03:57:29 crc kubenswrapper[4798]: I1011 03:57:29.528595 4798 generic.go:334] "Generic (PLEG): container finished" podID="46ccafd1-b791-4e38-8d42-13aebe2909b2" containerID="c5cf7e9991de4bf0a58e31e00ebb54fb57c345ed2e4533c1af1c1136f50877fd" exitCode=0 Oct 11 03:57:29 crc kubenswrapper[4798]: I1011 03:57:29.529175 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-lhsx5" event={"ID":"46ccafd1-b791-4e38-8d42-13aebe2909b2","Type":"ContainerDied","Data":"c5cf7e9991de4bf0a58e31e00ebb54fb57c345ed2e4533c1af1c1136f50877fd"} Oct 11 03:57:29 crc kubenswrapper[4798]: I1011 03:57:29.535900 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 03:57:29 crc kubenswrapper[4798]: E1011 03:57:29.536154 4798 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-11 03:57:30.036123857 +0000 UTC m=+145.372413543 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 03:57:29 crc kubenswrapper[4798]: I1011 03:57:29.536236 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bdw6s\" (UID: \"3de03a8d-34d7-416d-8f1b-330ad429a3b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-bdw6s" Oct 11 03:57:29 crc kubenswrapper[4798]: E1011 03:57:29.536699 4798 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-11 03:57:30.036682633 +0000 UTC m=+145.372972319 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bdw6s" (UID: "3de03a8d-34d7-416d-8f1b-330ad429a3b1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 03:57:29 crc kubenswrapper[4798]: I1011 03:57:29.536706 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-nptjh" event={"ID":"0026b84d-ad3a-41e1-bcd3-9a67f7d64ea0","Type":"ContainerStarted","Data":"1c2e3035ea9dbe81930568079877c1ac89c7648e1e6110186aac3b9fc6cbd478"} Oct 11 03:57:29 crc kubenswrapper[4798]: W1011 03:57:29.543511 4798 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podcf57b149_4889_439d_aaca_0ea06bbc1a45.slice/crio-8a9c7f3d58510ab67af044b277da766e5654e81b582339c97d075d6958d9ff16 WatchSource:0}: Error finding container 8a9c7f3d58510ab67af044b277da766e5654e81b582339c97d075d6958d9ff16: Status 404 returned error can't find the container with id 8a9c7f3d58510ab67af044b277da766e5654e81b582339c97d075d6958d9ff16 Oct 11 03:57:29 crc kubenswrapper[4798]: I1011 03:57:29.546580 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-kw978" event={"ID":"f9bbcb27-587d-4994-8ef7-25b9ac6bf912","Type":"ContainerStarted","Data":"34e4dfcfab7e1a16222438fd643cdf2bdab51a95eac1e5a2d75d5c4e5139b403"} Oct 11 03:57:29 crc kubenswrapper[4798]: I1011 03:57:29.547331 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-558db77b4-kw978" Oct 11 03:57:29 crc kubenswrapper[4798]: I1011 03:57:29.550822 4798 patch_prober.go:28] interesting pod/oauth-openshift-558db77b4-kw978 container/oauth-openshift namespace/openshift-authentication: Readiness probe status=failure output="Get \"https://10.217.0.10:6443/healthz\": dial tcp 10.217.0.10:6443: connect: connection refused" start-of-body= Oct 11 03:57:29 crc kubenswrapper[4798]: I1011 03:57:29.550877 4798 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-authentication/oauth-openshift-558db77b4-kw978" podUID="f9bbcb27-587d-4994-8ef7-25b9ac6bf912" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.10:6443/healthz\": dial tcp 10.217.0.10:6443: connect: connection refused" Oct 11 03:57:29 crc kubenswrapper[4798]: I1011 03:57:29.554458 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-xndjl" event={"ID":"136055f4-c38a-4cb1-8259-fcf1143c0bcf","Type":"ContainerStarted","Data":"2c7e095e0a4f4c9b40b87d60638f5444390f0ae079603d831a17edb4a51b5ad6"} Oct 11 03:57:29 crc kubenswrapper[4798]: I1011 03:57:29.562237 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-dxlvp" event={"ID":"972678a7-921f-4581-a5d4-8fcc57387afc","Type":"ContainerStarted","Data":"cc3133d48b16503d9071e8c63814b25c5c1f3c4698c8e6665b7478d73102e22e"} Oct 11 03:57:29 crc kubenswrapper[4798]: I1011 03:57:29.568292 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-98vrc" event={"ID":"f001bcce-3b27-477b-9ebf-46fdb31d546c","Type":"ContainerStarted","Data":"7f2456d6b824545557f7c55cae611a419e89e41072d40b1623e65f5b832cdd8e"} Oct 11 03:57:29 crc kubenswrapper[4798]: I1011 03:57:29.568361 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-98vrc" event={"ID":"f001bcce-3b27-477b-9ebf-46fdb31d546c","Type":"ContainerStarted","Data":"9ab8731d5e75afd0c0b9f2f01e14a63ab5564d1665989d53d490cfbbf53fe00b"} Oct 11 03:57:29 crc kubenswrapper[4798]: I1011 03:57:29.582095 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-rdggm" event={"ID":"6bf5263f-746e-4224-a63b-9a08d32e9e77","Type":"ContainerStarted","Data":"99523b1b947671257274e2a2d47787d32ceb26dbf7f32cc30cdd1328a7ef639a"} Oct 11 03:57:29 crc kubenswrapper[4798]: I1011 03:57:29.590049 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-n9wgt" event={"ID":"687b3ca2-3a7f-4d1d-be5b-8c3dfe9082e5","Type":"ContainerStarted","Data":"5bd08c57e86b8be186f17c499259b982c5de791b9317e89d3f47bf6565667191"} Oct 11 03:57:29 crc kubenswrapper[4798]: I1011 03:57:29.590985 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-9c8v5" event={"ID":"6c3e154e-469a-47c1-a71a-b09597e9d303","Type":"ContainerStarted","Data":"a1c368421eddc8056650b3ea0e64d005299e6107d93dd6b8208388afc635b757"} Oct 11 03:57:29 crc kubenswrapper[4798]: I1011 03:57:29.592065 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-rxtt6" event={"ID":"03cf12f7-f45c-46d4-a089-97618c3865b2","Type":"ContainerStarted","Data":"93bd9f9e1a9d5353a585b3c424747ec9ff7ead288bf4721d0aff0bd0afc62aea"} Oct 11 03:57:29 crc kubenswrapper[4798]: I1011 03:57:29.593666 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-rxtt6" Oct 11 03:57:29 crc kubenswrapper[4798]: I1011 03:57:29.596499 4798 patch_prober.go:28] interesting pod/olm-operator-6b444d44fb-rxtt6 container/olm-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.29:8443/healthz\": dial tcp 10.217.0.29:8443: connect: connection refused" start-of-body= Oct 11 03:57:29 crc kubenswrapper[4798]: I1011 03:57:29.596552 4798 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-rxtt6" podUID="03cf12f7-f45c-46d4-a089-97618c3865b2" containerName="olm-operator" probeResult="failure" output="Get \"https://10.217.0.29:8443/healthz\": dial tcp 10.217.0.29:8443: connect: connection refused" Oct 11 03:57:29 crc kubenswrapper[4798]: I1011 03:57:29.602757 4798 generic.go:334] "Generic (PLEG): container finished" podID="f196154c-0442-4600-967f-74eb36876b52" containerID="c887a9bd032fe202bb651ba476248aa4e403590f86b4835c2b2010c59ce13c25" exitCode=0 Oct 11 03:57:29 crc kubenswrapper[4798]: I1011 03:57:29.602899 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-db49p" event={"ID":"f196154c-0442-4600-967f-74eb36876b52","Type":"ContainerDied","Data":"c887a9bd032fe202bb651ba476248aa4e403590f86b4835c2b2010c59ce13c25"} Oct 11 03:57:29 crc kubenswrapper[4798]: I1011 03:57:29.605077 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-jtrv7"] Oct 11 03:57:29 crc kubenswrapper[4798]: I1011 03:57:29.606111 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-g52t6" event={"ID":"7965ee7e-8332-46c1-aff0-c2d96ea976dc","Type":"ContainerStarted","Data":"d52e1949414a6da5f7a51f75e85bd2faff414d1cebc734079ae4a91d2ea463db"} Oct 11 03:57:29 crc kubenswrapper[4798]: I1011 03:57:29.607275 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-g2k9v" event={"ID":"55408d92-301d-4b54-9cb1-09dc11423c33","Type":"ContainerStarted","Data":"3bc3143ddc4f778eaedc2ef05ebc8685416f2fdd273b40182503bbc7b62a1c0d"} Oct 11 03:57:29 crc kubenswrapper[4798]: I1011 03:57:29.614951 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-wqjfc" event={"ID":"29c4d9ea-078d-4ed3-a56a-c0a29887b6a1","Type":"ContainerStarted","Data":"e1c3824d84ddc076e2225c4b28a1d1b6334d8282303c857be5de85c8cf4e6e81"} Oct 11 03:57:29 crc kubenswrapper[4798]: I1011 03:57:29.634905 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-g95pt" event={"ID":"325af4a3-57e0-4c68-8c7f-153566048283","Type":"ContainerStarted","Data":"48f03c46c5cd683b8ee95246fcacb67e1324ff3e3f47ce262a9088fe961af901"} Oct 11 03:57:29 crc kubenswrapper[4798]: I1011 03:57:29.642333 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-wfsmq" event={"ID":"9bb478ba-d274-4a8f-a67d-635740a72f34","Type":"ContainerStarted","Data":"30c193bf06e97b874c509d49a942030120b24764c0da729b224db6b9d0b83e99"} Oct 11 03:57:29 crc kubenswrapper[4798]: I1011 03:57:29.645013 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 03:57:29 crc kubenswrapper[4798]: E1011 03:57:29.646691 4798 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-11 03:57:30.146670889 +0000 UTC m=+145.482960575 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 03:57:29 crc kubenswrapper[4798]: I1011 03:57:29.685335 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-tv8br" event={"ID":"467fda03-7ce6-41c9-967c-6e782bd68f90","Type":"ContainerStarted","Data":"56dd8316d52614bd7c0913751299b7399ac2e186e49479233f9dfb1e5906efcb"} Oct 11 03:57:29 crc kubenswrapper[4798]: I1011 03:57:29.746774 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bdw6s\" (UID: \"3de03a8d-34d7-416d-8f1b-330ad429a3b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-bdw6s" Oct 11 03:57:29 crc kubenswrapper[4798]: I1011 03:57:29.746839 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-jspml" event={"ID":"f26ca59f-fc9c-4cbd-9c82-bf0b756f9b88","Type":"ContainerStarted","Data":"b078b2b31b8a65209577aea5696af876ca6967735bbe41ce2a982e1db65625dc"} Oct 11 03:57:29 crc kubenswrapper[4798]: I1011 03:57:29.746897 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-jspml" event={"ID":"f26ca59f-fc9c-4cbd-9c82-bf0b756f9b88","Type":"ContainerStarted","Data":"4df67a3af451ad4f7ffcb37c85eb242fb651661c88fdfcbc6c6ec197ac285508"} Oct 11 03:57:29 crc kubenswrapper[4798]: E1011 03:57:29.748287 4798 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-11 03:57:30.24826945 +0000 UTC m=+145.584559136 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bdw6s" (UID: "3de03a8d-34d7-416d-8f1b-330ad429a3b1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 03:57:29 crc kubenswrapper[4798]: I1011 03:57:29.808804 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-qv29k" event={"ID":"b193848f-5963-432c-964b-460721f701c3","Type":"ContainerStarted","Data":"d6770e55889394e3f5009bde0b56e0bc6ce9abd171f8f3d4047eed31034db7d0"} Oct 11 03:57:29 crc kubenswrapper[4798]: I1011 03:57:29.814990 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-k8p2d" event={"ID":"9398ffbc-b6a8-4294-83d3-1c80dd716113","Type":"ContainerStarted","Data":"c0f685269f07b6ee3eea80a5e6f4908f546834195b056e14aac2d23b501cb06c"} Oct 11 03:57:29 crc kubenswrapper[4798]: I1011 03:57:29.822462 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console-operator/console-operator-58897d9998-k8p2d" Oct 11 03:57:29 crc kubenswrapper[4798]: I1011 03:57:29.838937 4798 patch_prober.go:28] interesting pod/console-operator-58897d9998-k8p2d container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.11:8443/readyz\": dial tcp 10.217.0.11:8443: connect: connection refused" start-of-body= Oct 11 03:57:29 crc kubenswrapper[4798]: I1011 03:57:29.839016 4798 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-58897d9998-k8p2d" podUID="9398ffbc-b6a8-4294-83d3-1c80dd716113" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.11:8443/readyz\": dial tcp 10.217.0.11:8443: connect: connection refused" Oct 11 03:57:29 crc kubenswrapper[4798]: I1011 03:57:29.841595 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-mdkrr" event={"ID":"e2211eec-aa3e-429f-9237-fc13fad065ed","Type":"ContainerStarted","Data":"437c0e534c5353bd329c6f7fd4a1bab46ec325e9690c47d88bb064e27fe02e39"} Oct 11 03:57:29 crc kubenswrapper[4798]: I1011 03:57:29.848314 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 03:57:29 crc kubenswrapper[4798]: E1011 03:57:29.849727 4798 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-11 03:57:30.349702428 +0000 UTC m=+145.685992114 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 03:57:29 crc kubenswrapper[4798]: I1011 03:57:29.854822 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-dp4kr" event={"ID":"45c5e131-b2e8-4a3f-8c26-0dfe5cacee11","Type":"ContainerStarted","Data":"add44596eb5fd6eabe1604330e45e7b9ffff5d3e08166f6b9d2e68b53ad16db5"} Oct 11 03:57:29 crc kubenswrapper[4798]: I1011 03:57:29.868642 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-lplcw" event={"ID":"550d113c-cbae-420d-a970-3a266a94134c","Type":"ContainerStarted","Data":"5e3a9d7e15adc5546c50d63a64e1d2d2b7b5abe6c8f7b564c6d84b4758203a72"} Oct 11 03:57:29 crc kubenswrapper[4798]: I1011 03:57:29.888421 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-z5h6j" event={"ID":"88b86aaf-78be-4bbb-ad2f-a404029803b3","Type":"ContainerStarted","Data":"759c428870e66a4657fe609ae72f7036ac25700a29265ec010d8d6c5a8572267"} Oct 11 03:57:29 crc kubenswrapper[4798]: I1011 03:57:29.909529 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-5lpj9" event={"ID":"79cb45d2-e74c-4bda-8464-2c2bbcdbcc4a","Type":"ContainerStarted","Data":"fc7e27c5c34b193a1868b23b9ec01cb75ad09036a180fff1fe437da649f98ba6"} Oct 11 03:57:29 crc kubenswrapper[4798]: I1011 03:57:29.916823 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-jsvs2" event={"ID":"056140e0-6e96-41d3-8d80-bd9e33e56d8b","Type":"ContainerStarted","Data":"c7750003d9d3629d0db5107842d43d339b55988113d29c20a44ff42031e30906"} Oct 11 03:57:29 crc kubenswrapper[4798]: I1011 03:57:29.939249 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-5bh8x" event={"ID":"77b085fa-7b0d-45e0-9dec-5adc0eeecb61","Type":"ContainerStarted","Data":"5acdc7b4445a191c1f720b6d49eebcae3be337928fad8a2433077f42f8505d3e"} Oct 11 03:57:29 crc kubenswrapper[4798]: I1011 03:57:29.958967 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bdw6s\" (UID: \"3de03a8d-34d7-416d-8f1b-330ad429a3b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-bdw6s" Oct 11 03:57:29 crc kubenswrapper[4798]: E1011 03:57:29.959498 4798 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-11 03:57:30.459479527 +0000 UTC m=+145.795769213 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bdw6s" (UID: "3de03a8d-34d7-416d-8f1b-330ad429a3b1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 03:57:29 crc kubenswrapper[4798]: I1011 03:57:29.959818 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-879f6c89f-4wcbm" Oct 11 03:57:29 crc kubenswrapper[4798]: I1011 03:57:29.980920 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication-operator/authentication-operator-69f744f599-nptjh" podStartSLOduration=124.980894012 podStartE2EDuration="2m4.980894012s" podCreationTimestamp="2025-10-11 03:55:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 03:57:29.979914723 +0000 UTC m=+145.316204409" watchObservedRunningTime="2025-10-11 03:57:29.980894012 +0000 UTC m=+145.317183698" Oct 11 03:57:30 crc kubenswrapper[4798]: I1011 03:57:30.033102 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-tv8br" podStartSLOduration=125.033069585 podStartE2EDuration="2m5.033069585s" podCreationTimestamp="2025-10-11 03:55:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 03:57:30.020939419 +0000 UTC m=+145.357229105" watchObservedRunningTime="2025-10-11 03:57:30.033069585 +0000 UTC m=+145.369359281" Oct 11 03:57:30 crc kubenswrapper[4798]: I1011 03:57:30.067637 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 03:57:30 crc kubenswrapper[4798]: E1011 03:57:30.074747 4798 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-11 03:57:30.574712699 +0000 UTC m=+145.911002385 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 03:57:30 crc kubenswrapper[4798]: I1011 03:57:30.121275 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress/router-default-5444994796-kgrgq" podStartSLOduration=125.121246612 podStartE2EDuration="2m5.121246612s" podCreationTimestamp="2025-10-11 03:55:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 03:57:30.075931206 +0000 UTC m=+145.412220892" watchObservedRunningTime="2025-10-11 03:57:30.121246612 +0000 UTC m=+145.457536298" Oct 11 03:57:30 crc kubenswrapper[4798]: I1011 03:57:30.121650 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-wfsmq" podStartSLOduration=125.121645215 podStartE2EDuration="2m5.121645215s" podCreationTimestamp="2025-10-11 03:55:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 03:57:30.120904182 +0000 UTC m=+145.457193868" watchObservedRunningTime="2025-10-11 03:57:30.121645215 +0000 UTC m=+145.457934901" Oct 11 03:57:30 crc kubenswrapper[4798]: I1011 03:57:30.179340 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bdw6s\" (UID: \"3de03a8d-34d7-416d-8f1b-330ad429a3b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-bdw6s" Oct 11 03:57:30 crc kubenswrapper[4798]: E1011 03:57:30.179863 4798 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-11 03:57:30.679847738 +0000 UTC m=+146.016137424 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bdw6s" (UID: "3de03a8d-34d7-416d-8f1b-330ad429a3b1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 03:57:30 crc kubenswrapper[4798]: I1011 03:57:30.290888 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 03:57:30 crc kubenswrapper[4798]: E1011 03:57:30.291487 4798 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-11 03:57:30.791467963 +0000 UTC m=+146.127757659 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 03:57:30 crc kubenswrapper[4798]: I1011 03:57:30.396671 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bdw6s\" (UID: \"3de03a8d-34d7-416d-8f1b-330ad429a3b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-bdw6s" Oct 11 03:57:30 crc kubenswrapper[4798]: E1011 03:57:30.397500 4798 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-11 03:57:30.897485968 +0000 UTC m=+146.233775654 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bdw6s" (UID: "3de03a8d-34d7-416d-8f1b-330ad429a3b1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 03:57:30 crc kubenswrapper[4798]: I1011 03:57:30.422143 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-5bh8x" podStartSLOduration=125.42212257 podStartE2EDuration="2m5.42212257s" podCreationTimestamp="2025-10-11 03:55:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 03:57:30.163312601 +0000 UTC m=+145.499602287" watchObservedRunningTime="2025-10-11 03:57:30.42212257 +0000 UTC m=+145.758412256" Oct 11 03:57:30 crc kubenswrapper[4798]: I1011 03:57:30.458683 4798 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-ingress/router-default-5444994796-kgrgq" Oct 11 03:57:30 crc kubenswrapper[4798]: I1011 03:57:30.472070 4798 patch_prober.go:28] interesting pod/router-default-5444994796-kgrgq container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Oct 11 03:57:30 crc kubenswrapper[4798]: [-]has-synced failed: reason withheld Oct 11 03:57:30 crc kubenswrapper[4798]: [+]process-running ok Oct 11 03:57:30 crc kubenswrapper[4798]: healthz check failed Oct 11 03:57:30 crc kubenswrapper[4798]: I1011 03:57:30.472133 4798 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-kgrgq" podUID="78405f10-b1f1-4401-8a3e-579ff5a739e1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Oct 11 03:57:30 crc kubenswrapper[4798]: I1011 03:57:30.490930 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-558db77b4-kw978" podStartSLOduration=125.490911174 podStartE2EDuration="2m5.490911174s" podCreationTimestamp="2025-10-11 03:55:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 03:57:30.436763502 +0000 UTC m=+145.773053188" watchObservedRunningTime="2025-10-11 03:57:30.490911174 +0000 UTC m=+145.827200860" Oct 11 03:57:30 crc kubenswrapper[4798]: I1011 03:57:30.499518 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 03:57:30 crc kubenswrapper[4798]: E1011 03:57:30.500040 4798 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-11 03:57:31.000017748 +0000 UTC m=+146.336307434 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 03:57:30 crc kubenswrapper[4798]: I1011 03:57:30.553726 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-jspml" podStartSLOduration=125.553696726 podStartE2EDuration="2m5.553696726s" podCreationTimestamp="2025-10-11 03:55:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 03:57:30.492240764 +0000 UTC m=+145.828530450" watchObservedRunningTime="2025-10-11 03:57:30.553696726 +0000 UTC m=+145.889986412" Oct 11 03:57:30 crc kubenswrapper[4798]: I1011 03:57:30.554024 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/machine-api-operator-5694c8668f-wqjfc" podStartSLOduration=125.554009456 podStartE2EDuration="2m5.554009456s" podCreationTimestamp="2025-10-11 03:55:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 03:57:30.542284412 +0000 UTC m=+145.878574098" watchObservedRunningTime="2025-10-11 03:57:30.554009456 +0000 UTC m=+145.890299142" Oct 11 03:57:30 crc kubenswrapper[4798]: I1011 03:57:30.586293 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-mdkrr" podStartSLOduration=125.586259628 podStartE2EDuration="2m5.586259628s" podCreationTimestamp="2025-10-11 03:55:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 03:57:30.581513485 +0000 UTC m=+145.917803181" watchObservedRunningTime="2025-10-11 03:57:30.586259628 +0000 UTC m=+145.922549304" Oct 11 03:57:30 crc kubenswrapper[4798]: I1011 03:57:30.608152 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bdw6s\" (UID: \"3de03a8d-34d7-416d-8f1b-330ad429a3b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-bdw6s" Oct 11 03:57:30 crc kubenswrapper[4798]: E1011 03:57:30.609114 4798 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-11 03:57:31.109091596 +0000 UTC m=+146.445381282 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bdw6s" (UID: "3de03a8d-34d7-416d-8f1b-330ad429a3b1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 03:57:30 crc kubenswrapper[4798]: I1011 03:57:30.627558 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-rxtt6" podStartSLOduration=125.627538482 podStartE2EDuration="2m5.627538482s" podCreationTimestamp="2025-10-11 03:55:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 03:57:30.625024456 +0000 UTC m=+145.961314142" watchObservedRunningTime="2025-10-11 03:57:30.627538482 +0000 UTC m=+145.963828168" Oct 11 03:57:30 crc kubenswrapper[4798]: I1011 03:57:30.716296 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 03:57:30 crc kubenswrapper[4798]: E1011 03:57:30.716682 4798 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-11 03:57:31.216665529 +0000 UTC m=+146.552955215 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 03:57:30 crc kubenswrapper[4798]: I1011 03:57:30.724635 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-f9d7485db-5lpj9" podStartSLOduration=125.724609007 podStartE2EDuration="2m5.724609007s" podCreationTimestamp="2025-10-11 03:55:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 03:57:30.722920117 +0000 UTC m=+146.059209803" watchObservedRunningTime="2025-10-11 03:57:30.724609007 +0000 UTC m=+146.060898703" Oct 11 03:57:30 crc kubenswrapper[4798]: I1011 03:57:30.819131 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bdw6s\" (UID: \"3de03a8d-34d7-416d-8f1b-330ad429a3b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-bdw6s" Oct 11 03:57:30 crc kubenswrapper[4798]: E1011 03:57:30.820030 4798 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-11 03:57:31.320015684 +0000 UTC m=+146.656305370 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bdw6s" (UID: "3de03a8d-34d7-416d-8f1b-330ad429a3b1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 03:57:30 crc kubenswrapper[4798]: I1011 03:57:30.842926 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console-operator/console-operator-58897d9998-k8p2d" podStartSLOduration=125.842893143 podStartE2EDuration="2m5.842893143s" podCreationTimestamp="2025-10-11 03:55:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 03:57:30.83383867 +0000 UTC m=+146.170128356" watchObservedRunningTime="2025-10-11 03:57:30.842893143 +0000 UTC m=+146.179182829" Oct 11 03:57:30 crc kubenswrapper[4798]: I1011 03:57:30.885577 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-server-98vrc" podStartSLOduration=6.885551378 podStartE2EDuration="6.885551378s" podCreationTimestamp="2025-10-11 03:57:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 03:57:30.883474946 +0000 UTC m=+146.219764642" watchObservedRunningTime="2025-10-11 03:57:30.885551378 +0000 UTC m=+146.221841064" Oct 11 03:57:30 crc kubenswrapper[4798]: I1011 03:57:30.922219 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 03:57:30 crc kubenswrapper[4798]: E1011 03:57:30.922754 4798 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-11 03:57:31.422717649 +0000 UTC m=+146.759007465 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 03:57:30 crc kubenswrapper[4798]: I1011 03:57:30.973603 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-cs7dc" event={"ID":"bdd198b5-674e-4bf8-afb5-15dbd2987ec8","Type":"ContainerStarted","Data":"234843eb4473255e800fd4cf06f50a278b3ab14a076357d0b3f88597d0c2934c"} Oct 11 03:57:31 crc kubenswrapper[4798]: I1011 03:57:31.028423 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bdw6s\" (UID: \"3de03a8d-34d7-416d-8f1b-330ad429a3b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-bdw6s" Oct 11 03:57:31 crc kubenswrapper[4798]: E1011 03:57:31.028847 4798 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-11 03:57:31.528832427 +0000 UTC m=+146.865122113 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bdw6s" (UID: "3de03a8d-34d7-416d-8f1b-330ad429a3b1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 03:57:31 crc kubenswrapper[4798]: I1011 03:57:31.042127 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-68b7z" event={"ID":"cf57b149-4889-439d-aaca-0ea06bbc1a45","Type":"ContainerStarted","Data":"485368e5b4ac836a95170c75c8b4f18528d891405d3289c11630edffcd9a1511"} Oct 11 03:57:31 crc kubenswrapper[4798]: I1011 03:57:31.042182 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-68b7z" event={"ID":"cf57b149-4889-439d-aaca-0ea06bbc1a45","Type":"ContainerStarted","Data":"8a9c7f3d58510ab67af044b277da766e5654e81b582339c97d075d6958d9ff16"} Oct 11 03:57:31 crc kubenswrapper[4798]: I1011 03:57:31.076652 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-xndjl" event={"ID":"136055f4-c38a-4cb1-8259-fcf1143c0bcf","Type":"ContainerStarted","Data":"bebb4fad3104249b6d0bab82d5d9a92dedbe9fb9e22a03c24b7aa91c145f06f2"} Oct 11 03:57:31 crc kubenswrapper[4798]: I1011 03:57:31.126361 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-g2k9v" event={"ID":"55408d92-301d-4b54-9cb1-09dc11423c33","Type":"ContainerStarted","Data":"1076188bd0244c0fdd8f91ed7ae3e776e26f70d8fb31d6f3897230cb6ccdbe19"} Oct 11 03:57:31 crc kubenswrapper[4798]: I1011 03:57:31.128292 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/downloads-7954f5f757-g2k9v" Oct 11 03:57:31 crc kubenswrapper[4798]: I1011 03:57:31.129768 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 03:57:31 crc kubenswrapper[4798]: E1011 03:57:31.131352 4798 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-11 03:57:31.631319016 +0000 UTC m=+146.967608702 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 03:57:31 crc kubenswrapper[4798]: I1011 03:57:31.139563 4798 patch_prober.go:28] interesting pod/downloads-7954f5f757-g2k9v container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.17:8080/\": dial tcp 10.217.0.17:8080: connect: connection refused" start-of-body= Oct 11 03:57:31 crc kubenswrapper[4798]: I1011 03:57:31.139632 4798 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-g2k9v" podUID="55408d92-301d-4b54-9cb1-09dc11423c33" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.17:8080/\": dial tcp 10.217.0.17:8080: connect: connection refused" Oct 11 03:57:31 crc kubenswrapper[4798]: I1011 03:57:31.174737 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca-operator/service-ca-operator-777779d784-xndjl" podStartSLOduration=126.174714474 podStartE2EDuration="2m6.174714474s" podCreationTimestamp="2025-10-11 03:55:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 03:57:31.112599102 +0000 UTC m=+146.448888788" watchObservedRunningTime="2025-10-11 03:57:31.174714474 +0000 UTC m=+146.511004150" Oct 11 03:57:31 crc kubenswrapper[4798]: I1011 03:57:31.175714 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/downloads-7954f5f757-g2k9v" podStartSLOduration=126.175707874 podStartE2EDuration="2m6.175707874s" podCreationTimestamp="2025-10-11 03:55:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 03:57:31.174960641 +0000 UTC m=+146.511250327" watchObservedRunningTime="2025-10-11 03:57:31.175707874 +0000 UTC m=+146.511997560" Oct 11 03:57:31 crc kubenswrapper[4798]: I1011 03:57:31.177866 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-lhsx5" event={"ID":"46ccafd1-b791-4e38-8d42-13aebe2909b2","Type":"ContainerStarted","Data":"895c6488c233b8563e5c2f7a01824b98783c8754177fa22fce3da33fcbe058e6"} Oct 11 03:57:31 crc kubenswrapper[4798]: I1011 03:57:31.178001 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-config-operator/openshift-config-operator-7777fb866f-lhsx5" Oct 11 03:57:31 crc kubenswrapper[4798]: I1011 03:57:31.228706 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-config-operator/openshift-config-operator-7777fb866f-lhsx5" podStartSLOduration=126.2286855 podStartE2EDuration="2m6.2286855s" podCreationTimestamp="2025-10-11 03:55:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 03:57:31.227791374 +0000 UTC m=+146.564081060" watchObservedRunningTime="2025-10-11 03:57:31.2286855 +0000 UTC m=+146.564975186" Oct 11 03:57:31 crc kubenswrapper[4798]: I1011 03:57:31.236362 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bdw6s\" (UID: \"3de03a8d-34d7-416d-8f1b-330ad429a3b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-bdw6s" Oct 11 03:57:31 crc kubenswrapper[4798]: E1011 03:57:31.236858 4798 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-11 03:57:31.736839996 +0000 UTC m=+147.073129682 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bdw6s" (UID: "3de03a8d-34d7-416d-8f1b-330ad429a3b1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 03:57:31 crc kubenswrapper[4798]: I1011 03:57:31.251114 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-64mrh" event={"ID":"5626f7eb-4f04-4292-a479-fadf2514fa78","Type":"ContainerStarted","Data":"b4616a89aa5ca998dcbafc3db00fd117bd06f46147232b8caca1fd3eed622f2d"} Oct 11 03:57:31 crc kubenswrapper[4798]: I1011 03:57:31.251169 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-64mrh" event={"ID":"5626f7eb-4f04-4292-a479-fadf2514fa78","Type":"ContainerStarted","Data":"3799d53b350377809537ae1a33286f10f01c19588953d88d088575d31dd92956"} Oct 11 03:57:31 crc kubenswrapper[4798]: I1011 03:57:31.255472 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-64mrh" Oct 11 03:57:31 crc kubenswrapper[4798]: I1011 03:57:31.258899 4798 patch_prober.go:28] interesting pod/catalog-operator-68c6474976-64mrh container/catalog-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.41:8443/healthz\": dial tcp 10.217.0.41:8443: connect: connection refused" start-of-body= Oct 11 03:57:31 crc kubenswrapper[4798]: I1011 03:57:31.258938 4798 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-64mrh" podUID="5626f7eb-4f04-4292-a479-fadf2514fa78" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.41:8443/healthz\": dial tcp 10.217.0.41:8443: connect: connection refused" Oct 11 03:57:31 crc kubenswrapper[4798]: I1011 03:57:31.266265 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-jtrv7" event={"ID":"3a8f8093-7e4c-450f-8eab-72d11ae9c846","Type":"ContainerStarted","Data":"37f5b46441cce6ea070c374cfb2fbcddac7a06c5128a941673b5ca9142d96a38"} Oct 11 03:57:31 crc kubenswrapper[4798]: I1011 03:57:31.266328 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-jtrv7" event={"ID":"3a8f8093-7e4c-450f-8eab-72d11ae9c846","Type":"ContainerStarted","Data":"45e6d69cd6dd02ea638c0d22c69ed60f75342634242897c0e3119f253c3d5529"} Oct 11 03:57:31 crc kubenswrapper[4798]: I1011 03:57:31.278218 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-64mrh" podStartSLOduration=126.27819946299999 podStartE2EDuration="2m6.278199463s" podCreationTimestamp="2025-10-11 03:55:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 03:57:31.278046629 +0000 UTC m=+146.614336315" watchObservedRunningTime="2025-10-11 03:57:31.278199463 +0000 UTC m=+146.614489149" Oct 11 03:57:31 crc kubenswrapper[4798]: I1011 03:57:31.298441 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-lplcw" event={"ID":"550d113c-cbae-420d-a970-3a266a94134c","Type":"ContainerStarted","Data":"440abfa7fd6f15c37e24602f6d90308366297c651e4bbae10d8d6d7405552a6d"} Oct 11 03:57:31 crc kubenswrapper[4798]: I1011 03:57:31.326731 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-lplcw" podStartSLOduration=126.326707885 podStartE2EDuration="2m6.326707885s" podCreationTimestamp="2025-10-11 03:55:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 03:57:31.325508039 +0000 UTC m=+146.661797725" watchObservedRunningTime="2025-10-11 03:57:31.326707885 +0000 UTC m=+146.662997571" Oct 11 03:57:31 crc kubenswrapper[4798]: I1011 03:57:31.339111 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 03:57:31 crc kubenswrapper[4798]: E1011 03:57:31.340302 4798 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-11 03:57:31.840284434 +0000 UTC m=+147.176574120 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 03:57:31 crc kubenswrapper[4798]: I1011 03:57:31.341708 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-g95pt" event={"ID":"325af4a3-57e0-4c68-8c7f-153566048283","Type":"ContainerStarted","Data":"8eecfecb339284d62cd90fa092d18840f2a3f9b11856ebbfbfab64ce84c89a27"} Oct 11 03:57:31 crc kubenswrapper[4798]: I1011 03:57:31.342487 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-g95pt" Oct 11 03:57:31 crc kubenswrapper[4798]: I1011 03:57:31.347556 4798 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-g95pt container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.36:5443/healthz\": dial tcp 10.217.0.36:5443: connect: connection refused" start-of-body= Oct 11 03:57:31 crc kubenswrapper[4798]: I1011 03:57:31.347627 4798 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-g95pt" podUID="325af4a3-57e0-4c68-8c7f-153566048283" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.36:5443/healthz\": dial tcp 10.217.0.36:5443: connect: connection refused" Oct 11 03:57:31 crc kubenswrapper[4798]: I1011 03:57:31.388207 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-48l5m" event={"ID":"39e1644a-d213-4689-b241-667a5c3e0c21","Type":"ContainerStarted","Data":"50f1630638fa1f73fa82ee0aa4c2a5081cd346e9ecaccc528da46aedcdc6173d"} Oct 11 03:57:31 crc kubenswrapper[4798]: I1011 03:57:31.388261 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-48l5m" event={"ID":"39e1644a-d213-4689-b241-667a5c3e0c21","Type":"ContainerStarted","Data":"e6e04445cb5d8a1de4d04248e8cd6d898231b54c030f270c9e0aab62a3bc2f42"} Oct 11 03:57:31 crc kubenswrapper[4798]: I1011 03:57:31.418064 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-rdggm" event={"ID":"6bf5263f-746e-4224-a63b-9a08d32e9e77","Type":"ContainerStarted","Data":"dffb53e5fe12e429a7ffa8f6fe1cd3814da8071bca19e11e2442a7426ebacfd8"} Oct 11 03:57:31 crc kubenswrapper[4798]: I1011 03:57:31.442621 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bdw6s\" (UID: \"3de03a8d-34d7-416d-8f1b-330ad429a3b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-bdw6s" Oct 11 03:57:31 crc kubenswrapper[4798]: E1011 03:57:31.442991 4798 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-11 03:57:31.942977999 +0000 UTC m=+147.279267685 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bdw6s" (UID: "3de03a8d-34d7-416d-8f1b-330ad429a3b1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 03:57:31 crc kubenswrapper[4798]: I1011 03:57:31.471935 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-jsvs2" event={"ID":"056140e0-6e96-41d3-8d80-bd9e33e56d8b","Type":"ContainerStarted","Data":"1169be6a43f1fee80de795e7b63c1446459682a44296faca32b42438d1d0db4e"} Oct 11 03:57:31 crc kubenswrapper[4798]: I1011 03:57:31.473996 4798 patch_prober.go:28] interesting pod/router-default-5444994796-kgrgq container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Oct 11 03:57:31 crc kubenswrapper[4798]: [-]has-synced failed: reason withheld Oct 11 03:57:31 crc kubenswrapper[4798]: [+]process-running ok Oct 11 03:57:31 crc kubenswrapper[4798]: healthz check failed Oct 11 03:57:31 crc kubenswrapper[4798]: I1011 03:57:31.474081 4798 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-kgrgq" podUID="78405f10-b1f1-4401-8a3e-579ff5a739e1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Oct 11 03:57:31 crc kubenswrapper[4798]: I1011 03:57:31.496771 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-canary/ingress-canary-rdggm" podStartSLOduration=7.49674129 podStartE2EDuration="7.49674129s" podCreationTimestamp="2025-10-11 03:57:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 03:57:31.481973655 +0000 UTC m=+146.818263341" watchObservedRunningTime="2025-10-11 03:57:31.49674129 +0000 UTC m=+146.833030976" Oct 11 03:57:31 crc kubenswrapper[4798]: I1011 03:57:31.497575 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-g95pt" podStartSLOduration=126.497570185 podStartE2EDuration="2m6.497570185s" podCreationTimestamp="2025-10-11 03:55:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 03:57:31.432248746 +0000 UTC m=+146.768538432" watchObservedRunningTime="2025-10-11 03:57:31.497570185 +0000 UTC m=+146.833859871" Oct 11 03:57:31 crc kubenswrapper[4798]: I1011 03:57:31.517600 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-g52t6" event={"ID":"7965ee7e-8332-46c1-aff0-c2d96ea976dc","Type":"ContainerStarted","Data":"73a52258b27edef6227f0c1b88204fe0cdf9a90b410330267f52026866a6abd9"} Oct 11 03:57:31 crc kubenswrapper[4798]: I1011 03:57:31.554955 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 03:57:31 crc kubenswrapper[4798]: E1011 03:57:31.560768 4798 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-11 03:57:32.060732509 +0000 UTC m=+147.397022195 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 03:57:31 crc kubenswrapper[4798]: I1011 03:57:31.562160 4798 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-tv8br" Oct 11 03:57:31 crc kubenswrapper[4798]: I1011 03:57:31.574790 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bdw6s\" (UID: \"3de03a8d-34d7-416d-8f1b-330ad429a3b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-bdw6s" Oct 11 03:57:31 crc kubenswrapper[4798]: E1011 03:57:31.575515 4798 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-11 03:57:32.075490313 +0000 UTC m=+147.411779999 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bdw6s" (UID: "3de03a8d-34d7-416d-8f1b-330ad429a3b1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 03:57:31 crc kubenswrapper[4798]: I1011 03:57:31.577329 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-tv8br" Oct 11 03:57:31 crc kubenswrapper[4798]: I1011 03:57:31.586126 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-xv9zg" event={"ID":"76d9d582-6e5f-4146-859a-5474e90dbe64","Type":"ContainerStarted","Data":"767ca9c454d4a6ff39b899eee906a98abe71c6b7a12a6de53d3d6fe323a85f4e"} Oct 11 03:57:31 crc kubenswrapper[4798]: I1011 03:57:31.638137 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca/service-ca-9c57cc56f-48l5m" podStartSLOduration=126.638111481 podStartE2EDuration="2m6.638111481s" podCreationTimestamp="2025-10-11 03:55:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 03:57:31.564661037 +0000 UTC m=+146.900950723" watchObservedRunningTime="2025-10-11 03:57:31.638111481 +0000 UTC m=+146.974401167" Oct 11 03:57:31 crc kubenswrapper[4798]: I1011 03:57:31.656492 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-admission-controller-857f4d67dd-jsvs2" podStartSLOduration=126.656466564 podStartE2EDuration="2m6.656466564s" podCreationTimestamp="2025-10-11 03:55:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 03:57:31.636161282 +0000 UTC m=+146.972450968" watchObservedRunningTime="2025-10-11 03:57:31.656466564 +0000 UTC m=+146.992756250" Oct 11 03:57:31 crc kubenswrapper[4798]: I1011 03:57:31.676134 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 03:57:31 crc kubenswrapper[4798]: I1011 03:57:31.676550 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-dp4kr" event={"ID":"45c5e131-b2e8-4a3f-8c26-0dfe5cacee11","Type":"ContainerStarted","Data":"403233cb4d6cc930947779dbde5efdf608eae4e2004a4936d2f878d2794daf7e"} Oct 11 03:57:31 crc kubenswrapper[4798]: I1011 03:57:31.689165 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-dp4kr" Oct 11 03:57:31 crc kubenswrapper[4798]: I1011 03:57:31.696626 4798 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-dp4kr container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.28:8080/healthz\": dial tcp 10.217.0.28:8080: connect: connection refused" start-of-body= Oct 11 03:57:31 crc kubenswrapper[4798]: I1011 03:57:31.696703 4798 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-dp4kr" podUID="45c5e131-b2e8-4a3f-8c26-0dfe5cacee11" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.28:8080/healthz\": dial tcp 10.217.0.28:8080: connect: connection refused" Oct 11 03:57:31 crc kubenswrapper[4798]: E1011 03:57:31.703580 4798 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-11 03:57:32.203547334 +0000 UTC m=+147.539837020 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 03:57:31 crc kubenswrapper[4798]: I1011 03:57:31.716152 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-n9wgt" event={"ID":"687b3ca2-3a7f-4d1d-be5b-8c3dfe9082e5","Type":"ContainerStarted","Data":"34f440d652395b8c89cdc1bcae17f29728d1521e7cb27b7982c80fdcdeecb8a3"} Oct 11 03:57:31 crc kubenswrapper[4798]: I1011 03:57:31.765655 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-9c8v5" event={"ID":"6c3e154e-469a-47c1-a71a-b09597e9d303","Type":"ContainerStarted","Data":"6f463880f08e73f586ff610569f6062c31341756428da3ce223ed9836f2d304a"} Oct 11 03:57:31 crc kubenswrapper[4798]: I1011 03:57:31.766635 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-9c8v5" Oct 11 03:57:31 crc kubenswrapper[4798]: I1011 03:57:31.791344 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-g52t6" podStartSLOduration=126.791323259 podStartE2EDuration="2m6.791323259s" podCreationTimestamp="2025-10-11 03:55:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 03:57:31.738862638 +0000 UTC m=+147.075152324" watchObservedRunningTime="2025-10-11 03:57:31.791323259 +0000 UTC m=+147.127612945" Oct 11 03:57:31 crc kubenswrapper[4798]: I1011 03:57:31.793777 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-z5h6j" event={"ID":"88b86aaf-78be-4bbb-ad2f-a404029803b3","Type":"ContainerStarted","Data":"57001381a7924790ff748173f8017598ec42e01b845ef3c2e14384d0c01c49ef"} Oct 11 03:57:31 crc kubenswrapper[4798]: I1011 03:57:31.797037 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bdw6s\" (UID: \"3de03a8d-34d7-416d-8f1b-330ad429a3b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-bdw6s" Oct 11 03:57:31 crc kubenswrapper[4798]: E1011 03:57:31.799866 4798 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-11 03:57:32.299846585 +0000 UTC m=+147.636136271 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bdw6s" (UID: "3de03a8d-34d7-416d-8f1b-330ad429a3b1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 03:57:31 crc kubenswrapper[4798]: I1011 03:57:31.838144 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29335905-vgmmx" event={"ID":"e5439d4a-8ce5-48c3-b0db-9e4b24ebfcdd","Type":"ContainerStarted","Data":"17bf07656456c1c694e9e48b11a6ddcdbe0f2b027a8abf029615aff20323b4bd"} Oct 11 03:57:31 crc kubenswrapper[4798]: I1011 03:57:31.838210 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29335905-vgmmx" event={"ID":"e5439d4a-8ce5-48c3-b0db-9e4b24ebfcdd","Type":"ContainerStarted","Data":"640248fd85837c6ebee9921630a6236f6d80d14758eba9da4879f8d0cd1e2835"} Oct 11 03:57:31 crc kubenswrapper[4798]: I1011 03:57:31.874982 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-558db77b4-kw978" Oct 11 03:57:31 crc kubenswrapper[4798]: I1011 03:57:31.875315 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-n9wgt" podStartSLOduration=126.875295949 podStartE2EDuration="2m6.875295949s" podCreationTimestamp="2025-10-11 03:55:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 03:57:31.874344401 +0000 UTC m=+147.210634087" watchObservedRunningTime="2025-10-11 03:57:31.875295949 +0000 UTC m=+147.211585635" Oct 11 03:57:31 crc kubenswrapper[4798]: I1011 03:57:31.877293 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-dp4kr" podStartSLOduration=126.87728755 podStartE2EDuration="2m6.87728755s" podCreationTimestamp="2025-10-11 03:55:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 03:57:31.799923168 +0000 UTC m=+147.136212854" watchObservedRunningTime="2025-10-11 03:57:31.87728755 +0000 UTC m=+147.213577236" Oct 11 03:57:31 crc kubenswrapper[4798]: I1011 03:57:31.880715 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console-operator/console-operator-58897d9998-k8p2d" Oct 11 03:57:31 crc kubenswrapper[4798]: I1011 03:57:31.900042 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 03:57:31 crc kubenswrapper[4798]: E1011 03:57:31.900186 4798 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-11 03:57:32.400142149 +0000 UTC m=+147.736431835 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 03:57:31 crc kubenswrapper[4798]: I1011 03:57:31.901592 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bdw6s\" (UID: \"3de03a8d-34d7-416d-8f1b-330ad429a3b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-bdw6s" Oct 11 03:57:31 crc kubenswrapper[4798]: E1011 03:57:31.923014 4798 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-11 03:57:32.422986097 +0000 UTC m=+147.759275783 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bdw6s" (UID: "3de03a8d-34d7-416d-8f1b-330ad429a3b1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 03:57:31 crc kubenswrapper[4798]: I1011 03:57:31.931857 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-rxtt6" Oct 11 03:57:32 crc kubenswrapper[4798]: I1011 03:57:32.004110 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 03:57:32 crc kubenswrapper[4798]: E1011 03:57:32.005066 4798 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-11 03:57:32.50502933 +0000 UTC m=+147.841319016 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 03:57:32 crc kubenswrapper[4798]: I1011 03:57:32.028505 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd-operator/etcd-operator-b45778765-xv9zg" podStartSLOduration=127.028484187 podStartE2EDuration="2m7.028484187s" podCreationTimestamp="2025-10-11 03:55:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 03:57:31.959870939 +0000 UTC m=+147.296160625" watchObservedRunningTime="2025-10-11 03:57:32.028484187 +0000 UTC m=+147.364773883" Oct 11 03:57:32 crc kubenswrapper[4798]: I1011 03:57:32.107382 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bdw6s\" (UID: \"3de03a8d-34d7-416d-8f1b-330ad429a3b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-bdw6s" Oct 11 03:57:32 crc kubenswrapper[4798]: E1011 03:57:32.107857 4798 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-11 03:57:32.607836418 +0000 UTC m=+147.944126104 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bdw6s" (UID: "3de03a8d-34d7-416d-8f1b-330ad429a3b1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 03:57:32 crc kubenswrapper[4798]: I1011 03:57:32.209182 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 03:57:32 crc kubenswrapper[4798]: E1011 03:57:32.209503 4798 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-11 03:57:32.709483932 +0000 UTC m=+148.045773618 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 03:57:32 crc kubenswrapper[4798]: I1011 03:57:32.287535 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-9c8v5" podStartSLOduration=127.287513404 podStartE2EDuration="2m7.287513404s" podCreationTimestamp="2025-10-11 03:55:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 03:57:32.287317469 +0000 UTC m=+147.623607155" watchObservedRunningTime="2025-10-11 03:57:32.287513404 +0000 UTC m=+147.623803080" Oct 11 03:57:32 crc kubenswrapper[4798]: I1011 03:57:32.310149 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bdw6s\" (UID: \"3de03a8d-34d7-416d-8f1b-330ad429a3b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-bdw6s" Oct 11 03:57:32 crc kubenswrapper[4798]: E1011 03:57:32.310595 4798 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-11 03:57:32.810574539 +0000 UTC m=+148.146864215 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bdw6s" (UID: "3de03a8d-34d7-416d-8f1b-330ad429a3b1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 03:57:32 crc kubenswrapper[4798]: I1011 03:57:32.317938 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29335905-vgmmx" podStartSLOduration=127.3179131 podStartE2EDuration="2m7.3179131s" podCreationTimestamp="2025-10-11 03:55:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 03:57:32.316962022 +0000 UTC m=+147.653251708" watchObservedRunningTime="2025-10-11 03:57:32.3179131 +0000 UTC m=+147.654202786" Oct 11 03:57:32 crc kubenswrapper[4798]: I1011 03:57:32.410904 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 03:57:32 crc kubenswrapper[4798]: E1011 03:57:32.411094 4798 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-11 03:57:32.911060658 +0000 UTC m=+148.247350344 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 03:57:32 crc kubenswrapper[4798]: I1011 03:57:32.411345 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bdw6s\" (UID: \"3de03a8d-34d7-416d-8f1b-330ad429a3b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-bdw6s" Oct 11 03:57:32 crc kubenswrapper[4798]: E1011 03:57:32.411839 4798 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-11 03:57:32.91182909 +0000 UTC m=+148.248118776 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bdw6s" (UID: "3de03a8d-34d7-416d-8f1b-330ad429a3b1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 03:57:32 crc kubenswrapper[4798]: I1011 03:57:32.458004 4798 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-tv8br" Oct 11 03:57:32 crc kubenswrapper[4798]: I1011 03:57:32.462132 4798 patch_prober.go:28] interesting pod/router-default-5444994796-kgrgq container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Oct 11 03:57:32 crc kubenswrapper[4798]: [-]has-synced failed: reason withheld Oct 11 03:57:32 crc kubenswrapper[4798]: [+]process-running ok Oct 11 03:57:32 crc kubenswrapper[4798]: healthz check failed Oct 11 03:57:32 crc kubenswrapper[4798]: I1011 03:57:32.462226 4798 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-kgrgq" podUID="78405f10-b1f1-4401-8a3e-579ff5a739e1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Oct 11 03:57:32 crc kubenswrapper[4798]: I1011 03:57:32.512497 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 03:57:32 crc kubenswrapper[4798]: E1011 03:57:32.512642 4798 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-11 03:57:33.012618568 +0000 UTC m=+148.348908254 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 03:57:32 crc kubenswrapper[4798]: I1011 03:57:32.513070 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bdw6s\" (UID: \"3de03a8d-34d7-416d-8f1b-330ad429a3b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-bdw6s" Oct 11 03:57:32 crc kubenswrapper[4798]: E1011 03:57:32.513667 4798 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-11 03:57:33.013631439 +0000 UTC m=+148.349921125 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bdw6s" (UID: "3de03a8d-34d7-416d-8f1b-330ad429a3b1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 03:57:32 crc kubenswrapper[4798]: I1011 03:57:32.614113 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 03:57:32 crc kubenswrapper[4798]: E1011 03:57:32.614631 4798 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-11 03:57:33.114603922 +0000 UTC m=+148.450893608 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 03:57:32 crc kubenswrapper[4798]: I1011 03:57:32.627140 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-9c8v5" Oct 11 03:57:32 crc kubenswrapper[4798]: I1011 03:57:32.716136 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bdw6s\" (UID: \"3de03a8d-34d7-416d-8f1b-330ad429a3b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-bdw6s" Oct 11 03:57:32 crc kubenswrapper[4798]: E1011 03:57:32.716680 4798 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-11 03:57:33.216654808 +0000 UTC m=+148.552944494 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bdw6s" (UID: "3de03a8d-34d7-416d-8f1b-330ad429a3b1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 03:57:32 crc kubenswrapper[4798]: I1011 03:57:32.817451 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 03:57:32 crc kubenswrapper[4798]: E1011 03:57:32.817712 4798 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-11 03:57:33.317674713 +0000 UTC m=+148.653964399 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 03:57:32 crc kubenswrapper[4798]: I1011 03:57:32.818270 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bdw6s\" (UID: \"3de03a8d-34d7-416d-8f1b-330ad429a3b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-bdw6s" Oct 11 03:57:32 crc kubenswrapper[4798]: E1011 03:57:32.818808 4798 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-11 03:57:33.318799337 +0000 UTC m=+148.655089023 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bdw6s" (UID: "3de03a8d-34d7-416d-8f1b-330ad429a3b1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 03:57:32 crc kubenswrapper[4798]: I1011 03:57:32.859866 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-qv29k" event={"ID":"b193848f-5963-432c-964b-460721f701c3","Type":"ContainerStarted","Data":"15a9de9a2d7075cb4ce918cfafe45b7d23d0b71af88b9f02dbb253679ffc8f4f"} Oct 11 03:57:32 crc kubenswrapper[4798]: I1011 03:57:32.864050 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-z5h6j" event={"ID":"88b86aaf-78be-4bbb-ad2f-a404029803b3","Type":"ContainerStarted","Data":"253b17d5212efc83c992c963f738085bc53423e6fafdd6fb91aa79d9c38b731d"} Oct 11 03:57:32 crc kubenswrapper[4798]: I1011 03:57:32.864852 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-dns/dns-default-z5h6j" Oct 11 03:57:32 crc kubenswrapper[4798]: I1011 03:57:32.867874 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-cs7dc" event={"ID":"bdd198b5-674e-4bf8-afb5-15dbd2987ec8","Type":"ContainerStarted","Data":"ec27c9c41fd1d1c0bb8d7e165f704b159c530497ea2e21296656b478a1e8be36"} Oct 11 03:57:32 crc kubenswrapper[4798]: I1011 03:57:32.890455 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-dxlvp" event={"ID":"972678a7-921f-4581-a5d4-8fcc57387afc","Type":"ContainerStarted","Data":"d575594f7c8aa702c110ffd9e1381271e2814ed0130ad584f95cb8c52f4b4125"} Oct 11 03:57:32 crc kubenswrapper[4798]: I1011 03:57:32.890541 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-dxlvp" event={"ID":"972678a7-921f-4581-a5d4-8fcc57387afc","Type":"ContainerStarted","Data":"83f98e687b5333195dec5ee2749d00327d0e617597d3a8750faf7b20130d8b55"} Oct 11 03:57:32 crc kubenswrapper[4798]: I1011 03:57:32.897702 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-68b7z" event={"ID":"cf57b149-4889-439d-aaca-0ea06bbc1a45","Type":"ContainerStarted","Data":"d113403e196d871b24f9887abd22ffad759bf8ec6706d5122a542cd5869e1a46"} Oct 11 03:57:32 crc kubenswrapper[4798]: I1011 03:57:32.919163 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 03:57:32 crc kubenswrapper[4798]: E1011 03:57:32.920551 4798 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-11 03:57:33.420521783 +0000 UTC m=+148.756811469 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 03:57:32 crc kubenswrapper[4798]: I1011 03:57:32.939272 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns-operator/dns-operator-744455d44c-qv29k" podStartSLOduration=127.939234307 podStartE2EDuration="2m7.939234307s" podCreationTimestamp="2025-10-11 03:55:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 03:57:32.906038946 +0000 UTC m=+148.242328632" watchObservedRunningTime="2025-10-11 03:57:32.939234307 +0000 UTC m=+148.275523993" Oct 11 03:57:32 crc kubenswrapper[4798]: I1011 03:57:32.940978 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-68b7z" podStartSLOduration=127.940970639 podStartE2EDuration="2m7.940970639s" podCreationTimestamp="2025-10-11 03:55:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 03:57:32.940792604 +0000 UTC m=+148.277082280" watchObservedRunningTime="2025-10-11 03:57:32.940970639 +0000 UTC m=+148.277260315" Oct 11 03:57:32 crc kubenswrapper[4798]: I1011 03:57:32.949772 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-jtrv7" event={"ID":"3a8f8093-7e4c-450f-8eab-72d11ae9c846","Type":"ContainerStarted","Data":"98f9580e85622bb8cb39d6c65fe9ec5443fbfc090e070adf632c97fd2c9a5841"} Oct 11 03:57:32 crc kubenswrapper[4798]: I1011 03:57:32.950173 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-jtrv7" Oct 11 03:57:32 crc kubenswrapper[4798]: I1011 03:57:32.972039 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-db49p" event={"ID":"f196154c-0442-4600-967f-74eb36876b52","Type":"ContainerStarted","Data":"0c979137afdecf391b5af2f9467bd03ac26300b69ea03d9e4de0260e7993bade"} Oct 11 03:57:32 crc kubenswrapper[4798]: I1011 03:57:32.972099 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-db49p" event={"ID":"f196154c-0442-4600-967f-74eb36876b52","Type":"ContainerStarted","Data":"cb2ec611df11c67767a0824e736133d4e0313c137a58cbe78447de7cef99d6af"} Oct 11 03:57:32 crc kubenswrapper[4798]: I1011 03:57:32.984486 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-lplcw" event={"ID":"550d113c-cbae-420d-a970-3a266a94134c","Type":"ContainerStarted","Data":"1a5c8bda84e4113720b58e076942a8dd4feb9a0f93ed0929cde1af7d036a9a5e"} Oct 11 03:57:33 crc kubenswrapper[4798]: I1011 03:57:33.014752 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/dns-default-z5h6j" podStartSLOduration=9.014730812 podStartE2EDuration="9.014730812s" podCreationTimestamp="2025-10-11 03:57:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 03:57:33.012819995 +0000 UTC m=+148.349109681" watchObservedRunningTime="2025-10-11 03:57:33.014730812 +0000 UTC m=+148.351020488" Oct 11 03:57:33 crc kubenswrapper[4798]: I1011 03:57:33.022367 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bdw6s\" (UID: \"3de03a8d-34d7-416d-8f1b-330ad429a3b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-bdw6s" Oct 11 03:57:33 crc kubenswrapper[4798]: E1011 03:57:33.025656 4798 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-11 03:57:33.525638141 +0000 UTC m=+148.861927817 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bdw6s" (UID: "3de03a8d-34d7-416d-8f1b-330ad429a3b1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 03:57:33 crc kubenswrapper[4798]: I1011 03:57:33.027668 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-g52t6" event={"ID":"7965ee7e-8332-46c1-aff0-c2d96ea976dc","Type":"ContainerStarted","Data":"0d1037bd4da92d9e71fb18893319cbb85df4f4057e9b1e6e73b755a7ac8999d1"} Oct 11 03:57:33 crc kubenswrapper[4798]: I1011 03:57:33.029521 4798 patch_prober.go:28] interesting pod/downloads-7954f5f757-g2k9v container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.17:8080/\": dial tcp 10.217.0.17:8080: connect: connection refused" start-of-body= Oct 11 03:57:33 crc kubenswrapper[4798]: I1011 03:57:33.029564 4798 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-g2k9v" podUID="55408d92-301d-4b54-9cb1-09dc11423c33" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.17:8080/\": dial tcp 10.217.0.17:8080: connect: connection refused" Oct 11 03:57:33 crc kubenswrapper[4798]: I1011 03:57:33.030308 4798 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-dp4kr container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.28:8080/healthz\": dial tcp 10.217.0.28:8080: connect: connection refused" start-of-body= Oct 11 03:57:33 crc kubenswrapper[4798]: I1011 03:57:33.030486 4798 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-dp4kr" podUID="45c5e131-b2e8-4a3f-8c26-0dfe5cacee11" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.28:8080/healthz\": dial tcp 10.217.0.28:8080: connect: connection refused" Oct 11 03:57:33 crc kubenswrapper[4798]: I1011 03:57:33.055588 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-tv8br" Oct 11 03:57:33 crc kubenswrapper[4798]: I1011 03:57:33.075080 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-cs7dc" podStartSLOduration=128.075058201 podStartE2EDuration="2m8.075058201s" podCreationTimestamp="2025-10-11 03:55:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 03:57:33.072469102 +0000 UTC m=+148.408758788" watchObservedRunningTime="2025-10-11 03:57:33.075058201 +0000 UTC m=+148.411347887" Oct 11 03:57:33 crc kubenswrapper[4798]: I1011 03:57:33.096794 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-64mrh" Oct 11 03:57:33 crc kubenswrapper[4798]: I1011 03:57:33.125201 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 03:57:33 crc kubenswrapper[4798]: E1011 03:57:33.125820 4798 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-11 03:57:33.6258004 +0000 UTC m=+148.962090086 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 03:57:33 crc kubenswrapper[4798]: I1011 03:57:33.228554 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 11 03:57:33 crc kubenswrapper[4798]: I1011 03:57:33.229946 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 11 03:57:33 crc kubenswrapper[4798]: I1011 03:57:33.231140 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 11 03:57:33 crc kubenswrapper[4798]: I1011 03:57:33.231467 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 11 03:57:33 crc kubenswrapper[4798]: I1011 03:57:33.231604 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bdw6s\" (UID: \"3de03a8d-34d7-416d-8f1b-330ad429a3b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-bdw6s" Oct 11 03:57:33 crc kubenswrapper[4798]: I1011 03:57:33.231710 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 11 03:57:33 crc kubenswrapper[4798]: E1011 03:57:33.242112 4798 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-11 03:57:33.742092815 +0000 UTC m=+149.078382501 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bdw6s" (UID: "3de03a8d-34d7-416d-8f1b-330ad429a3b1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 03:57:33 crc kubenswrapper[4798]: I1011 03:57:33.247911 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 11 03:57:33 crc kubenswrapper[4798]: I1011 03:57:33.252355 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 11 03:57:33 crc kubenswrapper[4798]: I1011 03:57:33.253208 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 11 03:57:33 crc kubenswrapper[4798]: I1011 03:57:33.339604 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 03:57:33 crc kubenswrapper[4798]: E1011 03:57:33.340261 4798 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-11 03:57:33.840239464 +0000 UTC m=+149.176529150 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 03:57:33 crc kubenswrapper[4798]: I1011 03:57:33.359675 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-g95pt" Oct 11 03:57:33 crc kubenswrapper[4798]: I1011 03:57:33.377039 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-jtrv7" podStartSLOduration=128.377017222 podStartE2EDuration="2m8.377017222s" podCreationTimestamp="2025-10-11 03:55:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 03:57:33.316802067 +0000 UTC m=+148.653091783" watchObservedRunningTime="2025-10-11 03:57:33.377017222 +0000 UTC m=+148.713306908" Oct 11 03:57:33 crc kubenswrapper[4798]: I1011 03:57:33.443568 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bdw6s\" (UID: \"3de03a8d-34d7-416d-8f1b-330ad429a3b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-bdw6s" Oct 11 03:57:33 crc kubenswrapper[4798]: E1011 03:57:33.444051 4798 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-11 03:57:33.944035172 +0000 UTC m=+149.280324858 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bdw6s" (UID: "3de03a8d-34d7-416d-8f1b-330ad429a3b1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 03:57:33 crc kubenswrapper[4798]: I1011 03:57:33.444342 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 11 03:57:33 crc kubenswrapper[4798]: I1011 03:57:33.449703 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver/apiserver-76f77b778f-db49p" podStartSLOduration=128.449678392 podStartE2EDuration="2m8.449678392s" podCreationTimestamp="2025-10-11 03:55:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 03:57:33.378765934 +0000 UTC m=+148.715055620" watchObservedRunningTime="2025-10-11 03:57:33.449678392 +0000 UTC m=+148.785968088" Oct 11 03:57:33 crc kubenswrapper[4798]: I1011 03:57:33.453744 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 11 03:57:33 crc kubenswrapper[4798]: I1011 03:57:33.461196 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 11 03:57:33 crc kubenswrapper[4798]: I1011 03:57:33.469258 4798 patch_prober.go:28] interesting pod/router-default-5444994796-kgrgq container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Oct 11 03:57:33 crc kubenswrapper[4798]: [-]has-synced failed: reason withheld Oct 11 03:57:33 crc kubenswrapper[4798]: [+]process-running ok Oct 11 03:57:33 crc kubenswrapper[4798]: healthz check failed Oct 11 03:57:33 crc kubenswrapper[4798]: I1011 03:57:33.469325 4798 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-kgrgq" podUID="78405f10-b1f1-4401-8a3e-579ff5a739e1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Oct 11 03:57:33 crc kubenswrapper[4798]: I1011 03:57:33.547230 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 03:57:33 crc kubenswrapper[4798]: E1011 03:57:33.547716 4798 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-11 03:57:34.047696646 +0000 UTC m=+149.383986322 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 03:57:33 crc kubenswrapper[4798]: I1011 03:57:33.649324 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bdw6s\" (UID: \"3de03a8d-34d7-416d-8f1b-330ad429a3b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-bdw6s" Oct 11 03:57:33 crc kubenswrapper[4798]: E1011 03:57:33.650080 4798 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-11 03:57:34.150066142 +0000 UTC m=+149.486355818 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bdw6s" (UID: "3de03a8d-34d7-416d-8f1b-330ad429a3b1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 03:57:33 crc kubenswrapper[4798]: I1011 03:57:33.750597 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 03:57:33 crc kubenswrapper[4798]: E1011 03:57:33.750848 4798 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-11 03:57:34.250811098 +0000 UTC m=+149.587100784 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 03:57:33 crc kubenswrapper[4798]: I1011 03:57:33.853315 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bdw6s\" (UID: \"3de03a8d-34d7-416d-8f1b-330ad429a3b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-bdw6s" Oct 11 03:57:33 crc kubenswrapper[4798]: E1011 03:57:33.853732 4798 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-11 03:57:34.353717349 +0000 UTC m=+149.690007025 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bdw6s" (UID: "3de03a8d-34d7-416d-8f1b-330ad429a3b1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 03:57:33 crc kubenswrapper[4798]: I1011 03:57:33.954373 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 03:57:33 crc kubenswrapper[4798]: E1011 03:57:33.955280 4798 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-11 03:57:34.45526123 +0000 UTC m=+149.791550916 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 03:57:34 crc kubenswrapper[4798]: I1011 03:57:34.029544 4798 patch_prober.go:28] interesting pod/openshift-config-operator-7777fb866f-lhsx5 container/openshift-config-operator namespace/openshift-config-operator: Readiness probe status=failure output="Get \"https://10.217.0.24:8443/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Oct 11 03:57:34 crc kubenswrapper[4798]: I1011 03:57:34.029647 4798 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-config-operator/openshift-config-operator-7777fb866f-lhsx5" podUID="46ccafd1-b791-4e38-8d42-13aebe2909b2" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.24:8443/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Oct 11 03:57:34 crc kubenswrapper[4798]: I1011 03:57:34.045899 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-dxlvp" event={"ID":"972678a7-921f-4581-a5d4-8fcc57387afc","Type":"ContainerStarted","Data":"b9d294fb8391e6970048969213bef98e1971337a0a24eb95875283ee92ea5060"} Oct 11 03:57:34 crc kubenswrapper[4798]: I1011 03:57:34.047762 4798 patch_prober.go:28] interesting pod/downloads-7954f5f757-g2k9v container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.17:8080/\": dial tcp 10.217.0.17:8080: connect: connection refused" start-of-body= Oct 11 03:57:34 crc kubenswrapper[4798]: I1011 03:57:34.047817 4798 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-g2k9v" podUID="55408d92-301d-4b54-9cb1-09dc11423c33" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.17:8080/\": dial tcp 10.217.0.17:8080: connect: connection refused" Oct 11 03:57:34 crc kubenswrapper[4798]: I1011 03:57:34.061778 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bdw6s\" (UID: \"3de03a8d-34d7-416d-8f1b-330ad429a3b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-bdw6s" Oct 11 03:57:34 crc kubenswrapper[4798]: E1011 03:57:34.070289 4798 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-11 03:57:34.570261566 +0000 UTC m=+149.906551462 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bdw6s" (UID: "3de03a8d-34d7-416d-8f1b-330ad429a3b1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 03:57:34 crc kubenswrapper[4798]: I1011 03:57:34.152354 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-l8bxj"] Oct 11 03:57:34 crc kubenswrapper[4798]: I1011 03:57:34.161775 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-l8bxj" Oct 11 03:57:34 crc kubenswrapper[4798]: I1011 03:57:34.162763 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 03:57:34 crc kubenswrapper[4798]: I1011 03:57:34.162855 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5df1000c-7c15-48cd-adcc-0c286e651fad-catalog-content\") pod \"community-operators-l8bxj\" (UID: \"5df1000c-7c15-48cd-adcc-0c286e651fad\") " pod="openshift-marketplace/community-operators-l8bxj" Oct 11 03:57:34 crc kubenswrapper[4798]: I1011 03:57:34.162885 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g48hs\" (UniqueName: \"kubernetes.io/projected/5df1000c-7c15-48cd-adcc-0c286e651fad-kube-api-access-g48hs\") pod \"community-operators-l8bxj\" (UID: \"5df1000c-7c15-48cd-adcc-0c286e651fad\") " pod="openshift-marketplace/community-operators-l8bxj" Oct 11 03:57:34 crc kubenswrapper[4798]: I1011 03:57:34.162948 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5df1000c-7c15-48cd-adcc-0c286e651fad-utilities\") pod \"community-operators-l8bxj\" (UID: \"5df1000c-7c15-48cd-adcc-0c286e651fad\") " pod="openshift-marketplace/community-operators-l8bxj" Oct 11 03:57:34 crc kubenswrapper[4798]: E1011 03:57:34.163077 4798 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-11 03:57:34.663061353 +0000 UTC m=+149.999351039 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 03:57:34 crc kubenswrapper[4798]: I1011 03:57:34.164922 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Oct 11 03:57:34 crc kubenswrapper[4798]: I1011 03:57:34.172365 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-l8bxj"] Oct 11 03:57:34 crc kubenswrapper[4798]: I1011 03:57:34.277589 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5df1000c-7c15-48cd-adcc-0c286e651fad-catalog-content\") pod \"community-operators-l8bxj\" (UID: \"5df1000c-7c15-48cd-adcc-0c286e651fad\") " pod="openshift-marketplace/community-operators-l8bxj" Oct 11 03:57:34 crc kubenswrapper[4798]: I1011 03:57:34.277632 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g48hs\" (UniqueName: \"kubernetes.io/projected/5df1000c-7c15-48cd-adcc-0c286e651fad-kube-api-access-g48hs\") pod \"community-operators-l8bxj\" (UID: \"5df1000c-7c15-48cd-adcc-0c286e651fad\") " pod="openshift-marketplace/community-operators-l8bxj" Oct 11 03:57:34 crc kubenswrapper[4798]: I1011 03:57:34.277671 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bdw6s\" (UID: \"3de03a8d-34d7-416d-8f1b-330ad429a3b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-bdw6s" Oct 11 03:57:34 crc kubenswrapper[4798]: I1011 03:57:34.277706 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5df1000c-7c15-48cd-adcc-0c286e651fad-utilities\") pod \"community-operators-l8bxj\" (UID: \"5df1000c-7c15-48cd-adcc-0c286e651fad\") " pod="openshift-marketplace/community-operators-l8bxj" Oct 11 03:57:34 crc kubenswrapper[4798]: I1011 03:57:34.278310 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5df1000c-7c15-48cd-adcc-0c286e651fad-utilities\") pod \"community-operators-l8bxj\" (UID: \"5df1000c-7c15-48cd-adcc-0c286e651fad\") " pod="openshift-marketplace/community-operators-l8bxj" Oct 11 03:57:34 crc kubenswrapper[4798]: E1011 03:57:34.279286 4798 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-11 03:57:34.779238775 +0000 UTC m=+150.115528521 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bdw6s" (UID: "3de03a8d-34d7-416d-8f1b-330ad429a3b1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 03:57:34 crc kubenswrapper[4798]: I1011 03:57:34.284233 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5df1000c-7c15-48cd-adcc-0c286e651fad-catalog-content\") pod \"community-operators-l8bxj\" (UID: \"5df1000c-7c15-48cd-adcc-0c286e651fad\") " pod="openshift-marketplace/community-operators-l8bxj" Oct 11 03:57:34 crc kubenswrapper[4798]: I1011 03:57:34.311866 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-7k4v2"] Oct 11 03:57:34 crc kubenswrapper[4798]: I1011 03:57:34.313202 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7k4v2" Oct 11 03:57:34 crc kubenswrapper[4798]: I1011 03:57:34.322107 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-7k4v2"] Oct 11 03:57:34 crc kubenswrapper[4798]: I1011 03:57:34.325804 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Oct 11 03:57:34 crc kubenswrapper[4798]: I1011 03:57:34.330125 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g48hs\" (UniqueName: \"kubernetes.io/projected/5df1000c-7c15-48cd-adcc-0c286e651fad-kube-api-access-g48hs\") pod \"community-operators-l8bxj\" (UID: \"5df1000c-7c15-48cd-adcc-0c286e651fad\") " pod="openshift-marketplace/community-operators-l8bxj" Oct 11 03:57:34 crc kubenswrapper[4798]: I1011 03:57:34.379129 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-4wcbm"] Oct 11 03:57:34 crc kubenswrapper[4798]: I1011 03:57:34.379676 4798 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-879f6c89f-4wcbm" podUID="d9eedcac-7bb5-4ddf-a856-387e2fb253b1" containerName="controller-manager" containerID="cri-o://f85079db10fc845d82c98835a1b7dc1ff421eb8e6e7f3522aea0c54237713707" gracePeriod=30 Oct 11 03:57:34 crc kubenswrapper[4798]: I1011 03:57:34.380039 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 03:57:34 crc kubenswrapper[4798]: I1011 03:57:34.380172 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/08878c03-1346-41ac-9f4f-874cd48e2129-catalog-content\") pod \"certified-operators-7k4v2\" (UID: \"08878c03-1346-41ac-9f4f-874cd48e2129\") " pod="openshift-marketplace/certified-operators-7k4v2" Oct 11 03:57:34 crc kubenswrapper[4798]: I1011 03:57:34.380205 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/08878c03-1346-41ac-9f4f-874cd48e2129-utilities\") pod \"certified-operators-7k4v2\" (UID: \"08878c03-1346-41ac-9f4f-874cd48e2129\") " pod="openshift-marketplace/certified-operators-7k4v2" Oct 11 03:57:34 crc kubenswrapper[4798]: I1011 03:57:34.380230 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-snwss\" (UniqueName: \"kubernetes.io/projected/08878c03-1346-41ac-9f4f-874cd48e2129-kube-api-access-snwss\") pod \"certified-operators-7k4v2\" (UID: \"08878c03-1346-41ac-9f4f-874cd48e2129\") " pod="openshift-marketplace/certified-operators-7k4v2" Oct 11 03:57:34 crc kubenswrapper[4798]: E1011 03:57:34.380273 4798 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-11 03:57:34.880253639 +0000 UTC m=+150.216543325 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 03:57:34 crc kubenswrapper[4798]: I1011 03:57:34.380369 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bdw6s\" (UID: \"3de03a8d-34d7-416d-8f1b-330ad429a3b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-bdw6s" Oct 11 03:57:34 crc kubenswrapper[4798]: E1011 03:57:34.380693 4798 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-11 03:57:34.880686273 +0000 UTC m=+150.216975959 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bdw6s" (UID: "3de03a8d-34d7-416d-8f1b-330ad429a3b1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 03:57:34 crc kubenswrapper[4798]: I1011 03:57:34.459464 4798 patch_prober.go:28] interesting pod/router-default-5444994796-kgrgq container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Oct 11 03:57:34 crc kubenswrapper[4798]: [-]has-synced failed: reason withheld Oct 11 03:57:34 crc kubenswrapper[4798]: [+]process-running ok Oct 11 03:57:34 crc kubenswrapper[4798]: healthz check failed Oct 11 03:57:34 crc kubenswrapper[4798]: I1011 03:57:34.459546 4798 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-kgrgq" podUID="78405f10-b1f1-4401-8a3e-579ff5a739e1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Oct 11 03:57:34 crc kubenswrapper[4798]: I1011 03:57:34.484133 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 03:57:34 crc kubenswrapper[4798]: I1011 03:57:34.484540 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-snwss\" (UniqueName: \"kubernetes.io/projected/08878c03-1346-41ac-9f4f-874cd48e2129-kube-api-access-snwss\") pod \"certified-operators-7k4v2\" (UID: \"08878c03-1346-41ac-9f4f-874cd48e2129\") " pod="openshift-marketplace/certified-operators-7k4v2" Oct 11 03:57:34 crc kubenswrapper[4798]: I1011 03:57:34.484688 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/08878c03-1346-41ac-9f4f-874cd48e2129-catalog-content\") pod \"certified-operators-7k4v2\" (UID: \"08878c03-1346-41ac-9f4f-874cd48e2129\") " pod="openshift-marketplace/certified-operators-7k4v2" Oct 11 03:57:34 crc kubenswrapper[4798]: I1011 03:57:34.488767 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/08878c03-1346-41ac-9f4f-874cd48e2129-utilities\") pod \"certified-operators-7k4v2\" (UID: \"08878c03-1346-41ac-9f4f-874cd48e2129\") " pod="openshift-marketplace/certified-operators-7k4v2" Oct 11 03:57:34 crc kubenswrapper[4798]: E1011 03:57:34.495030 4798 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-11 03:57:34.994999908 +0000 UTC m=+150.331289594 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 03:57:34 crc kubenswrapper[4798]: I1011 03:57:34.496024 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/08878c03-1346-41ac-9f4f-874cd48e2129-catalog-content\") pod \"certified-operators-7k4v2\" (UID: \"08878c03-1346-41ac-9f4f-874cd48e2129\") " pod="openshift-marketplace/certified-operators-7k4v2" Oct 11 03:57:34 crc kubenswrapper[4798]: I1011 03:57:34.501626 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/08878c03-1346-41ac-9f4f-874cd48e2129-utilities\") pod \"certified-operators-7k4v2\" (UID: \"08878c03-1346-41ac-9f4f-874cd48e2129\") " pod="openshift-marketplace/certified-operators-7k4v2" Oct 11 03:57:34 crc kubenswrapper[4798]: I1011 03:57:34.515888 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-x4ngf"] Oct 11 03:57:34 crc kubenswrapper[4798]: I1011 03:57:34.517210 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-l8bxj" Oct 11 03:57:34 crc kubenswrapper[4798]: I1011 03:57:34.518200 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-x4ngf" Oct 11 03:57:34 crc kubenswrapper[4798]: I1011 03:57:34.525342 4798 plugin_watcher.go:194] "Adding socket path or updating timestamp to desired state cache" path="/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock" Oct 11 03:57:34 crc kubenswrapper[4798]: I1011 03:57:34.544793 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-x4ngf"] Oct 11 03:57:34 crc kubenswrapper[4798]: I1011 03:57:34.552634 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-snwss\" (UniqueName: \"kubernetes.io/projected/08878c03-1346-41ac-9f4f-874cd48e2129-kube-api-access-snwss\") pod \"certified-operators-7k4v2\" (UID: \"08878c03-1346-41ac-9f4f-874cd48e2129\") " pod="openshift-marketplace/certified-operators-7k4v2" Oct 11 03:57:34 crc kubenswrapper[4798]: I1011 03:57:34.598257 4798 reconciler.go:161] "OperationExecutor.RegisterPlugin started" plugin={"SocketPath":"/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock","Timestamp":"2025-10-11T03:57:34.526594271Z","Handler":null,"Name":""} Oct 11 03:57:34 crc kubenswrapper[4798]: I1011 03:57:34.610541 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bdw6s\" (UID: \"3de03a8d-34d7-416d-8f1b-330ad429a3b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-bdw6s" Oct 11 03:57:34 crc kubenswrapper[4798]: E1011 03:57:34.611054 4798 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-11 03:57:35.111038205 +0000 UTC m=+150.447327891 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bdw6s" (UID: "3de03a8d-34d7-416d-8f1b-330ad429a3b1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 03:57:34 crc kubenswrapper[4798]: I1011 03:57:34.699057 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-n8zfl"] Oct 11 03:57:34 crc kubenswrapper[4798]: I1011 03:57:34.701213 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-n8zfl" Oct 11 03:57:34 crc kubenswrapper[4798]: I1011 03:57:34.712024 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 03:57:34 crc kubenswrapper[4798]: E1011 03:57:34.712270 4798 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-11 03:57:35.212239515 +0000 UTC m=+150.548529201 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 03:57:34 crc kubenswrapper[4798]: I1011 03:57:34.713434 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-72rr6\" (UniqueName: \"kubernetes.io/projected/07406786-26c9-42e3-9602-64c9cbe7f235-kube-api-access-72rr6\") pod \"community-operators-x4ngf\" (UID: \"07406786-26c9-42e3-9602-64c9cbe7f235\") " pod="openshift-marketplace/community-operators-x4ngf" Oct 11 03:57:34 crc kubenswrapper[4798]: I1011 03:57:34.713496 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/07406786-26c9-42e3-9602-64c9cbe7f235-catalog-content\") pod \"community-operators-x4ngf\" (UID: \"07406786-26c9-42e3-9602-64c9cbe7f235\") " pod="openshift-marketplace/community-operators-x4ngf" Oct 11 03:57:34 crc kubenswrapper[4798]: I1011 03:57:34.713530 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/07406786-26c9-42e3-9602-64c9cbe7f235-utilities\") pod \"community-operators-x4ngf\" (UID: \"07406786-26c9-42e3-9602-64c9cbe7f235\") " pod="openshift-marketplace/community-operators-x4ngf" Oct 11 03:57:34 crc kubenswrapper[4798]: I1011 03:57:34.713673 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bdw6s\" (UID: \"3de03a8d-34d7-416d-8f1b-330ad429a3b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-bdw6s" Oct 11 03:57:34 crc kubenswrapper[4798]: E1011 03:57:34.714022 4798 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-11 03:57:35.214008549 +0000 UTC m=+150.550298235 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bdw6s" (UID: "3de03a8d-34d7-416d-8f1b-330ad429a3b1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 03:57:34 crc kubenswrapper[4798]: I1011 03:57:34.729528 4798 csi_plugin.go:100] kubernetes.io/csi: Trying to validate a new CSI Driver with name: kubevirt.io.hostpath-provisioner endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock versions: 1.0.0 Oct 11 03:57:34 crc kubenswrapper[4798]: I1011 03:57:34.729889 4798 csi_plugin.go:113] kubernetes.io/csi: Register new plugin with name: kubevirt.io.hostpath-provisioner at endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock Oct 11 03:57:34 crc kubenswrapper[4798]: I1011 03:57:34.733913 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-n8zfl"] Oct 11 03:57:34 crc kubenswrapper[4798]: I1011 03:57:34.815447 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 03:57:34 crc kubenswrapper[4798]: I1011 03:57:34.815934 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b37ceeac-8a39-4c11-8e4a-51047efe0da4-utilities\") pod \"certified-operators-n8zfl\" (UID: \"b37ceeac-8a39-4c11-8e4a-51047efe0da4\") " pod="openshift-marketplace/certified-operators-n8zfl" Oct 11 03:57:34 crc kubenswrapper[4798]: I1011 03:57:34.815996 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-72rr6\" (UniqueName: \"kubernetes.io/projected/07406786-26c9-42e3-9602-64c9cbe7f235-kube-api-access-72rr6\") pod \"community-operators-x4ngf\" (UID: \"07406786-26c9-42e3-9602-64c9cbe7f235\") " pod="openshift-marketplace/community-operators-x4ngf" Oct 11 03:57:34 crc kubenswrapper[4798]: I1011 03:57:34.816017 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/07406786-26c9-42e3-9602-64c9cbe7f235-catalog-content\") pod \"community-operators-x4ngf\" (UID: \"07406786-26c9-42e3-9602-64c9cbe7f235\") " pod="openshift-marketplace/community-operators-x4ngf" Oct 11 03:57:34 crc kubenswrapper[4798]: I1011 03:57:34.816058 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wlq27\" (UniqueName: \"kubernetes.io/projected/b37ceeac-8a39-4c11-8e4a-51047efe0da4-kube-api-access-wlq27\") pod \"certified-operators-n8zfl\" (UID: \"b37ceeac-8a39-4c11-8e4a-51047efe0da4\") " pod="openshift-marketplace/certified-operators-n8zfl" Oct 11 03:57:34 crc kubenswrapper[4798]: I1011 03:57:34.816084 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/07406786-26c9-42e3-9602-64c9cbe7f235-utilities\") pod \"community-operators-x4ngf\" (UID: \"07406786-26c9-42e3-9602-64c9cbe7f235\") " pod="openshift-marketplace/community-operators-x4ngf" Oct 11 03:57:34 crc kubenswrapper[4798]: I1011 03:57:34.816136 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b37ceeac-8a39-4c11-8e4a-51047efe0da4-catalog-content\") pod \"certified-operators-n8zfl\" (UID: \"b37ceeac-8a39-4c11-8e4a-51047efe0da4\") " pod="openshift-marketplace/certified-operators-n8zfl" Oct 11 03:57:34 crc kubenswrapper[4798]: I1011 03:57:34.817907 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/07406786-26c9-42e3-9602-64c9cbe7f235-catalog-content\") pod \"community-operators-x4ngf\" (UID: \"07406786-26c9-42e3-9602-64c9cbe7f235\") " pod="openshift-marketplace/community-operators-x4ngf" Oct 11 03:57:34 crc kubenswrapper[4798]: I1011 03:57:34.818267 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/07406786-26c9-42e3-9602-64c9cbe7f235-utilities\") pod \"community-operators-x4ngf\" (UID: \"07406786-26c9-42e3-9602-64c9cbe7f235\") " pod="openshift-marketplace/community-operators-x4ngf" Oct 11 03:57:34 crc kubenswrapper[4798]: I1011 03:57:34.823174 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Oct 11 03:57:34 crc kubenswrapper[4798]: I1011 03:57:34.834764 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7k4v2" Oct 11 03:57:34 crc kubenswrapper[4798]: I1011 03:57:34.902922 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-72rr6\" (UniqueName: \"kubernetes.io/projected/07406786-26c9-42e3-9602-64c9cbe7f235-kube-api-access-72rr6\") pod \"community-operators-x4ngf\" (UID: \"07406786-26c9-42e3-9602-64c9cbe7f235\") " pod="openshift-marketplace/community-operators-x4ngf" Oct 11 03:57:34 crc kubenswrapper[4798]: I1011 03:57:34.903709 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-x4ngf" Oct 11 03:57:34 crc kubenswrapper[4798]: I1011 03:57:34.918002 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bdw6s\" (UID: \"3de03a8d-34d7-416d-8f1b-330ad429a3b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-bdw6s" Oct 11 03:57:34 crc kubenswrapper[4798]: I1011 03:57:34.918081 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b37ceeac-8a39-4c11-8e4a-51047efe0da4-utilities\") pod \"certified-operators-n8zfl\" (UID: \"b37ceeac-8a39-4c11-8e4a-51047efe0da4\") " pod="openshift-marketplace/certified-operators-n8zfl" Oct 11 03:57:34 crc kubenswrapper[4798]: I1011 03:57:34.918112 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wlq27\" (UniqueName: \"kubernetes.io/projected/b37ceeac-8a39-4c11-8e4a-51047efe0da4-kube-api-access-wlq27\") pod \"certified-operators-n8zfl\" (UID: \"b37ceeac-8a39-4c11-8e4a-51047efe0da4\") " pod="openshift-marketplace/certified-operators-n8zfl" Oct 11 03:57:34 crc kubenswrapper[4798]: I1011 03:57:34.918151 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b37ceeac-8a39-4c11-8e4a-51047efe0da4-catalog-content\") pod \"certified-operators-n8zfl\" (UID: \"b37ceeac-8a39-4c11-8e4a-51047efe0da4\") " pod="openshift-marketplace/certified-operators-n8zfl" Oct 11 03:57:34 crc kubenswrapper[4798]: I1011 03:57:34.918768 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b37ceeac-8a39-4c11-8e4a-51047efe0da4-catalog-content\") pod \"certified-operators-n8zfl\" (UID: \"b37ceeac-8a39-4c11-8e4a-51047efe0da4\") " pod="openshift-marketplace/certified-operators-n8zfl" Oct 11 03:57:34 crc kubenswrapper[4798]: I1011 03:57:34.923824 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b37ceeac-8a39-4c11-8e4a-51047efe0da4-utilities\") pod \"certified-operators-n8zfl\" (UID: \"b37ceeac-8a39-4c11-8e4a-51047efe0da4\") " pod="openshift-marketplace/certified-operators-n8zfl" Oct 11 03:57:34 crc kubenswrapper[4798]: I1011 03:57:34.952836 4798 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Oct 11 03:57:34 crc kubenswrapper[4798]: I1011 03:57:34.953217 4798 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bdw6s\" (UID: \"3de03a8d-34d7-416d-8f1b-330ad429a3b1\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount\"" pod="openshift-image-registry/image-registry-697d97f7c8-bdw6s" Oct 11 03:57:34 crc kubenswrapper[4798]: I1011 03:57:34.953108 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wlq27\" (UniqueName: \"kubernetes.io/projected/b37ceeac-8a39-4c11-8e4a-51047efe0da4-kube-api-access-wlq27\") pod \"certified-operators-n8zfl\" (UID: \"b37ceeac-8a39-4c11-8e4a-51047efe0da4\") " pod="openshift-marketplace/certified-operators-n8zfl" Oct 11 03:57:35 crc kubenswrapper[4798]: I1011 03:57:35.018313 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bdw6s\" (UID: \"3de03a8d-34d7-416d-8f1b-330ad429a3b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-bdw6s" Oct 11 03:57:35 crc kubenswrapper[4798]: I1011 03:57:35.024794 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-n8zfl" Oct 11 03:57:35 crc kubenswrapper[4798]: I1011 03:57:35.102899 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"16cbc05ed0f98e397bb1b9c1f18015ef8eda4c71a7f4d1b3be64ccc8943fc1ae"} Oct 11 03:57:35 crc kubenswrapper[4798]: I1011 03:57:35.102965 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"ed67a2c59b584d2a660ad5464314b5ef0324baefda9f5651753fa2fb59b8a8a1"} Oct 11 03:57:35 crc kubenswrapper[4798]: I1011 03:57:35.120200 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-4wcbm" Oct 11 03:57:35 crc kubenswrapper[4798]: I1011 03:57:35.125532 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"33fddc75a2d7b9b23903e50aeb8bc8551d99be78634fc64d8d1376b13a4fbcb1"} Oct 11 03:57:35 crc kubenswrapper[4798]: I1011 03:57:35.125596 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"353b43fda4bda8e18a4629ed97ebee9c65b9bdc24dc17f5ed7ae5129b967c2d8"} Oct 11 03:57:35 crc kubenswrapper[4798]: I1011 03:57:35.149798 4798 generic.go:334] "Generic (PLEG): container finished" podID="d9eedcac-7bb5-4ddf-a856-387e2fb253b1" containerID="f85079db10fc845d82c98835a1b7dc1ff421eb8e6e7f3522aea0c54237713707" exitCode=0 Oct 11 03:57:35 crc kubenswrapper[4798]: I1011 03:57:35.149921 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-4wcbm" event={"ID":"d9eedcac-7bb5-4ddf-a856-387e2fb253b1","Type":"ContainerDied","Data":"f85079db10fc845d82c98835a1b7dc1ff421eb8e6e7f3522aea0c54237713707"} Oct 11 03:57:35 crc kubenswrapper[4798]: I1011 03:57:35.149968 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-4wcbm" event={"ID":"d9eedcac-7bb5-4ddf-a856-387e2fb253b1","Type":"ContainerDied","Data":"a0fedc651405a69ca5e1f6bca83f6541ae0c440d0bf5bf742615294613b606c5"} Oct 11 03:57:35 crc kubenswrapper[4798]: I1011 03:57:35.149988 4798 scope.go:117] "RemoveContainer" containerID="f85079db10fc845d82c98835a1b7dc1ff421eb8e6e7f3522aea0c54237713707" Oct 11 03:57:35 crc kubenswrapper[4798]: I1011 03:57:35.150143 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-4wcbm" Oct 11 03:57:35 crc kubenswrapper[4798]: I1011 03:57:35.166930 4798 generic.go:334] "Generic (PLEG): container finished" podID="e5439d4a-8ce5-48c3-b0db-9e4b24ebfcdd" containerID="17bf07656456c1c694e9e48b11a6ddcdbe0f2b027a8abf029615aff20323b4bd" exitCode=0 Oct 11 03:57:35 crc kubenswrapper[4798]: I1011 03:57:35.167020 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29335905-vgmmx" event={"ID":"e5439d4a-8ce5-48c3-b0db-9e4b24ebfcdd","Type":"ContainerDied","Data":"17bf07656456c1c694e9e48b11a6ddcdbe0f2b027a8abf029615aff20323b4bd"} Oct 11 03:57:35 crc kubenswrapper[4798]: I1011 03:57:35.201106 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"a990c215c0ed7897a7b5cdaf8bda2d908ef8012c4530e331aeb20f2e872d506f"} Oct 11 03:57:35 crc kubenswrapper[4798]: I1011 03:57:35.201609 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"97d7512eb15866a47c509c1540b34a417ec41b966a9b2da34b48cd7cfb7a7d13"} Oct 11 03:57:35 crc kubenswrapper[4798]: I1011 03:57:35.202658 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 11 03:57:35 crc kubenswrapper[4798]: I1011 03:57:35.227444 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-dxlvp" event={"ID":"972678a7-921f-4581-a5d4-8fcc57387afc","Type":"ContainerStarted","Data":"0b635634b663f7267e023db58aa84f996c48814cf362265754389f7d5c88edef"} Oct 11 03:57:35 crc kubenswrapper[4798]: I1011 03:57:35.229488 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-frdtx\" (UniqueName: \"kubernetes.io/projected/d9eedcac-7bb5-4ddf-a856-387e2fb253b1-kube-api-access-frdtx\") pod \"d9eedcac-7bb5-4ddf-a856-387e2fb253b1\" (UID: \"d9eedcac-7bb5-4ddf-a856-387e2fb253b1\") " Oct 11 03:57:35 crc kubenswrapper[4798]: I1011 03:57:35.229550 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/d9eedcac-7bb5-4ddf-a856-387e2fb253b1-client-ca\") pod \"d9eedcac-7bb5-4ddf-a856-387e2fb253b1\" (UID: \"d9eedcac-7bb5-4ddf-a856-387e2fb253b1\") " Oct 11 03:57:35 crc kubenswrapper[4798]: I1011 03:57:35.229598 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/d9eedcac-7bb5-4ddf-a856-387e2fb253b1-proxy-ca-bundles\") pod \"d9eedcac-7bb5-4ddf-a856-387e2fb253b1\" (UID: \"d9eedcac-7bb5-4ddf-a856-387e2fb253b1\") " Oct 11 03:57:35 crc kubenswrapper[4798]: I1011 03:57:35.229654 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d9eedcac-7bb5-4ddf-a856-387e2fb253b1-serving-cert\") pod \"d9eedcac-7bb5-4ddf-a856-387e2fb253b1\" (UID: \"d9eedcac-7bb5-4ddf-a856-387e2fb253b1\") " Oct 11 03:57:35 crc kubenswrapper[4798]: I1011 03:57:35.229674 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d9eedcac-7bb5-4ddf-a856-387e2fb253b1-config\") pod \"d9eedcac-7bb5-4ddf-a856-387e2fb253b1\" (UID: \"d9eedcac-7bb5-4ddf-a856-387e2fb253b1\") " Oct 11 03:57:35 crc kubenswrapper[4798]: I1011 03:57:35.231614 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d9eedcac-7bb5-4ddf-a856-387e2fb253b1-config" (OuterVolumeSpecName: "config") pod "d9eedcac-7bb5-4ddf-a856-387e2fb253b1" (UID: "d9eedcac-7bb5-4ddf-a856-387e2fb253b1"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 03:57:35 crc kubenswrapper[4798]: I1011 03:57:35.232162 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d9eedcac-7bb5-4ddf-a856-387e2fb253b1-client-ca" (OuterVolumeSpecName: "client-ca") pod "d9eedcac-7bb5-4ddf-a856-387e2fb253b1" (UID: "d9eedcac-7bb5-4ddf-a856-387e2fb253b1"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 03:57:35 crc kubenswrapper[4798]: I1011 03:57:35.232215 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d9eedcac-7bb5-4ddf-a856-387e2fb253b1-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "d9eedcac-7bb5-4ddf-a856-387e2fb253b1" (UID: "d9eedcac-7bb5-4ddf-a856-387e2fb253b1"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 03:57:35 crc kubenswrapper[4798]: I1011 03:57:35.257432 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d9eedcac-7bb5-4ddf-a856-387e2fb253b1-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "d9eedcac-7bb5-4ddf-a856-387e2fb253b1" (UID: "d9eedcac-7bb5-4ddf-a856-387e2fb253b1"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 03:57:35 crc kubenswrapper[4798]: I1011 03:57:35.258082 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d9eedcac-7bb5-4ddf-a856-387e2fb253b1-kube-api-access-frdtx" (OuterVolumeSpecName: "kube-api-access-frdtx") pod "d9eedcac-7bb5-4ddf-a856-387e2fb253b1" (UID: "d9eedcac-7bb5-4ddf-a856-387e2fb253b1"). InnerVolumeSpecName "kube-api-access-frdtx". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 03:57:35 crc kubenswrapper[4798]: I1011 03:57:35.267522 4798 scope.go:117] "RemoveContainer" containerID="f85079db10fc845d82c98835a1b7dc1ff421eb8e6e7f3522aea0c54237713707" Oct 11 03:57:35 crc kubenswrapper[4798]: E1011 03:57:35.268019 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f85079db10fc845d82c98835a1b7dc1ff421eb8e6e7f3522aea0c54237713707\": container with ID starting with f85079db10fc845d82c98835a1b7dc1ff421eb8e6e7f3522aea0c54237713707 not found: ID does not exist" containerID="f85079db10fc845d82c98835a1b7dc1ff421eb8e6e7f3522aea0c54237713707" Oct 11 03:57:35 crc kubenswrapper[4798]: I1011 03:57:35.268081 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f85079db10fc845d82c98835a1b7dc1ff421eb8e6e7f3522aea0c54237713707"} err="failed to get container status \"f85079db10fc845d82c98835a1b7dc1ff421eb8e6e7f3522aea0c54237713707\": rpc error: code = NotFound desc = could not find container \"f85079db10fc845d82c98835a1b7dc1ff421eb8e6e7f3522aea0c54237713707\": container with ID starting with f85079db10fc845d82c98835a1b7dc1ff421eb8e6e7f3522aea0c54237713707 not found: ID does not exist" Oct 11 03:57:35 crc kubenswrapper[4798]: I1011 03:57:35.273873 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-bdw6s" Oct 11 03:57:35 crc kubenswrapper[4798]: I1011 03:57:35.333903 4798 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d9eedcac-7bb5-4ddf-a856-387e2fb253b1-serving-cert\") on node \"crc\" DevicePath \"\"" Oct 11 03:57:35 crc kubenswrapper[4798]: I1011 03:57:35.333933 4798 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d9eedcac-7bb5-4ddf-a856-387e2fb253b1-config\") on node \"crc\" DevicePath \"\"" Oct 11 03:57:35 crc kubenswrapper[4798]: I1011 03:57:35.333945 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-frdtx\" (UniqueName: \"kubernetes.io/projected/d9eedcac-7bb5-4ddf-a856-387e2fb253b1-kube-api-access-frdtx\") on node \"crc\" DevicePath \"\"" Oct 11 03:57:35 crc kubenswrapper[4798]: I1011 03:57:35.334051 4798 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/d9eedcac-7bb5-4ddf-a856-387e2fb253b1-client-ca\") on node \"crc\" DevicePath \"\"" Oct 11 03:57:35 crc kubenswrapper[4798]: I1011 03:57:35.334062 4798 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/d9eedcac-7bb5-4ddf-a856-387e2fb253b1-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Oct 11 03:57:35 crc kubenswrapper[4798]: I1011 03:57:35.349194 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="hostpath-provisioner/csi-hostpathplugin-dxlvp" podStartSLOduration=11.349154862 podStartE2EDuration="11.349154862s" podCreationTimestamp="2025-10-11 03:57:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 03:57:35.257347286 +0000 UTC m=+150.593636972" watchObservedRunningTime="2025-10-11 03:57:35.349154862 +0000 UTC m=+150.685444548" Oct 11 03:57:35 crc kubenswrapper[4798]: I1011 03:57:35.354344 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-l8bxj"] Oct 11 03:57:35 crc kubenswrapper[4798]: W1011 03:57:35.371171 4798 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5df1000c_7c15_48cd_adcc_0c286e651fad.slice/crio-06ae338d2c522e05ba2fad27617ea4f127717d6fb7991c766dea113c518107d6 WatchSource:0}: Error finding container 06ae338d2c522e05ba2fad27617ea4f127717d6fb7991c766dea113c518107d6: Status 404 returned error can't find the container with id 06ae338d2c522e05ba2fad27617ea4f127717d6fb7991c766dea113c518107d6 Oct 11 03:57:35 crc kubenswrapper[4798]: I1011 03:57:35.464090 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8f668bae-612b-4b75-9490-919e737c6a3b" path="/var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes" Oct 11 03:57:35 crc kubenswrapper[4798]: I1011 03:57:35.470599 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-x4ngf"] Oct 11 03:57:35 crc kubenswrapper[4798]: I1011 03:57:35.473536 4798 patch_prober.go:28] interesting pod/router-default-5444994796-kgrgq container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Oct 11 03:57:35 crc kubenswrapper[4798]: [-]has-synced failed: reason withheld Oct 11 03:57:35 crc kubenswrapper[4798]: [+]process-running ok Oct 11 03:57:35 crc kubenswrapper[4798]: healthz check failed Oct 11 03:57:35 crc kubenswrapper[4798]: I1011 03:57:35.473597 4798 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-kgrgq" podUID="78405f10-b1f1-4401-8a3e-579ff5a739e1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Oct 11 03:57:35 crc kubenswrapper[4798]: I1011 03:57:35.589519 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-4wcbm"] Oct 11 03:57:35 crc kubenswrapper[4798]: I1011 03:57:35.601250 4798 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-4wcbm"] Oct 11 03:57:35 crc kubenswrapper[4798]: I1011 03:57:35.609372 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-7k4v2"] Oct 11 03:57:35 crc kubenswrapper[4798]: I1011 03:57:35.619706 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Oct 11 03:57:35 crc kubenswrapper[4798]: E1011 03:57:35.620348 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d9eedcac-7bb5-4ddf-a856-387e2fb253b1" containerName="controller-manager" Oct 11 03:57:35 crc kubenswrapper[4798]: I1011 03:57:35.620363 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="d9eedcac-7bb5-4ddf-a856-387e2fb253b1" containerName="controller-manager" Oct 11 03:57:35 crc kubenswrapper[4798]: I1011 03:57:35.620952 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="d9eedcac-7bb5-4ddf-a856-387e2fb253b1" containerName="controller-manager" Oct 11 03:57:35 crc kubenswrapper[4798]: I1011 03:57:35.628840 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Oct 11 03:57:35 crc kubenswrapper[4798]: I1011 03:57:35.633073 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Oct 11 03:57:35 crc kubenswrapper[4798]: I1011 03:57:35.643171 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Oct 11 03:57:35 crc kubenswrapper[4798]: I1011 03:57:35.662268 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Oct 11 03:57:35 crc kubenswrapper[4798]: I1011 03:57:35.666416 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-n8zfl"] Oct 11 03:57:35 crc kubenswrapper[4798]: I1011 03:57:35.709266 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-bdw6s"] Oct 11 03:57:35 crc kubenswrapper[4798]: I1011 03:57:35.744803 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/b2528278-dc60-43fd-8f3c-92182689a8d8-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"b2528278-dc60-43fd-8f3c-92182689a8d8\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Oct 11 03:57:35 crc kubenswrapper[4798]: I1011 03:57:35.744864 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/b2528278-dc60-43fd-8f3c-92182689a8d8-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"b2528278-dc60-43fd-8f3c-92182689a8d8\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Oct 11 03:57:35 crc kubenswrapper[4798]: I1011 03:57:35.780304 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-config-operator/openshift-config-operator-7777fb866f-lhsx5" Oct 11 03:57:35 crc kubenswrapper[4798]: I1011 03:57:35.850338 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/b2528278-dc60-43fd-8f3c-92182689a8d8-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"b2528278-dc60-43fd-8f3c-92182689a8d8\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Oct 11 03:57:35 crc kubenswrapper[4798]: I1011 03:57:35.850507 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/b2528278-dc60-43fd-8f3c-92182689a8d8-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"b2528278-dc60-43fd-8f3c-92182689a8d8\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Oct 11 03:57:35 crc kubenswrapper[4798]: I1011 03:57:35.850943 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/b2528278-dc60-43fd-8f3c-92182689a8d8-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"b2528278-dc60-43fd-8f3c-92182689a8d8\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Oct 11 03:57:35 crc kubenswrapper[4798]: I1011 03:57:35.875544 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/b2528278-dc60-43fd-8f3c-92182689a8d8-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"b2528278-dc60-43fd-8f3c-92182689a8d8\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Oct 11 03:57:36 crc kubenswrapper[4798]: I1011 03:57:36.004794 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Oct 11 03:57:36 crc kubenswrapper[4798]: I1011 03:57:36.234006 4798 generic.go:334] "Generic (PLEG): container finished" podID="b37ceeac-8a39-4c11-8e4a-51047efe0da4" containerID="e9a0e48e65a5ffb8d4e9fcd7eedaa1687ad26db4e1d0ed0ae59c27722254bc14" exitCode=0 Oct 11 03:57:36 crc kubenswrapper[4798]: I1011 03:57:36.234113 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-n8zfl" event={"ID":"b37ceeac-8a39-4c11-8e4a-51047efe0da4","Type":"ContainerDied","Data":"e9a0e48e65a5ffb8d4e9fcd7eedaa1687ad26db4e1d0ed0ae59c27722254bc14"} Oct 11 03:57:36 crc kubenswrapper[4798]: I1011 03:57:36.234214 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-n8zfl" event={"ID":"b37ceeac-8a39-4c11-8e4a-51047efe0da4","Type":"ContainerStarted","Data":"dbf2289716cfd47493ff89b4b3ad329e13f380bcb773a11282c5e8524377ef80"} Oct 11 03:57:36 crc kubenswrapper[4798]: I1011 03:57:36.236724 4798 generic.go:334] "Generic (PLEG): container finished" podID="08878c03-1346-41ac-9f4f-874cd48e2129" containerID="2fe7a29c2983a8f378bdc87f3776b44298bd4a548068a9878532e12ae8e3adfc" exitCode=0 Oct 11 03:57:36 crc kubenswrapper[4798]: I1011 03:57:36.236750 4798 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Oct 11 03:57:36 crc kubenswrapper[4798]: I1011 03:57:36.236840 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-7k4v2" event={"ID":"08878c03-1346-41ac-9f4f-874cd48e2129","Type":"ContainerDied","Data":"2fe7a29c2983a8f378bdc87f3776b44298bd4a548068a9878532e12ae8e3adfc"} Oct 11 03:57:36 crc kubenswrapper[4798]: I1011 03:57:36.236920 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-7k4v2" event={"ID":"08878c03-1346-41ac-9f4f-874cd48e2129","Type":"ContainerStarted","Data":"92d6f1d2e21085802dce08dcf8f98a1f1dad8105b6c02fce152f1da1bf4ba86a"} Oct 11 03:57:36 crc kubenswrapper[4798]: I1011 03:57:36.239893 4798 generic.go:334] "Generic (PLEG): container finished" podID="07406786-26c9-42e3-9602-64c9cbe7f235" containerID="0b02982d8ced9097ed73ed023860a43bbe8f3f1df65a492b54b65ed3a5675a49" exitCode=0 Oct 11 03:57:36 crc kubenswrapper[4798]: I1011 03:57:36.239932 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-x4ngf" event={"ID":"07406786-26c9-42e3-9602-64c9cbe7f235","Type":"ContainerDied","Data":"0b02982d8ced9097ed73ed023860a43bbe8f3f1df65a492b54b65ed3a5675a49"} Oct 11 03:57:36 crc kubenswrapper[4798]: I1011 03:57:36.239968 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-x4ngf" event={"ID":"07406786-26c9-42e3-9602-64c9cbe7f235","Type":"ContainerStarted","Data":"4a897bb8be4c0e300a0f9ad9de25f02ada482b1c364f314bc868d30d11f74d10"} Oct 11 03:57:36 crc kubenswrapper[4798]: I1011 03:57:36.243334 4798 generic.go:334] "Generic (PLEG): container finished" podID="5df1000c-7c15-48cd-adcc-0c286e651fad" containerID="2fd2e0d05737597dd13740074cdf4f1f772374111cc3ba002d863c680066a67f" exitCode=0 Oct 11 03:57:36 crc kubenswrapper[4798]: I1011 03:57:36.243436 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-l8bxj" event={"ID":"5df1000c-7c15-48cd-adcc-0c286e651fad","Type":"ContainerDied","Data":"2fd2e0d05737597dd13740074cdf4f1f772374111cc3ba002d863c680066a67f"} Oct 11 03:57:36 crc kubenswrapper[4798]: I1011 03:57:36.243497 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-l8bxj" event={"ID":"5df1000c-7c15-48cd-adcc-0c286e651fad","Type":"ContainerStarted","Data":"06ae338d2c522e05ba2fad27617ea4f127717d6fb7991c766dea113c518107d6"} Oct 11 03:57:36 crc kubenswrapper[4798]: I1011 03:57:36.250271 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-bdw6s" event={"ID":"3de03a8d-34d7-416d-8f1b-330ad429a3b1","Type":"ContainerStarted","Data":"154bffdf90ea7d36f6c03e255c3a7da7395a5d57d71ae8bf7a971d53a53b316a"} Oct 11 03:57:36 crc kubenswrapper[4798]: I1011 03:57:36.250324 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-bdw6s" event={"ID":"3de03a8d-34d7-416d-8f1b-330ad429a3b1","Type":"ContainerStarted","Data":"355f05c9487ae5e6f1655c94f81d446993a80c8dba5ac59e1d8437d356f79db4"} Oct 11 03:57:36 crc kubenswrapper[4798]: I1011 03:57:36.274243 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Oct 11 03:57:36 crc kubenswrapper[4798]: I1011 03:57:36.279018 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-mvv66"] Oct 11 03:57:36 crc kubenswrapper[4798]: I1011 03:57:36.280432 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-mvv66" Oct 11 03:57:36 crc kubenswrapper[4798]: I1011 03:57:36.288891 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Oct 11 03:57:36 crc kubenswrapper[4798]: I1011 03:57:36.301587 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-mvv66"] Oct 11 03:57:36 crc kubenswrapper[4798]: I1011 03:57:36.334247 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-697d97f7c8-bdw6s" podStartSLOduration=131.334226963 podStartE2EDuration="2m11.334226963s" podCreationTimestamp="2025-10-11 03:55:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 03:57:36.330014196 +0000 UTC m=+151.666303882" watchObservedRunningTime="2025-10-11 03:57:36.334226963 +0000 UTC m=+151.670516649" Oct 11 03:57:36 crc kubenswrapper[4798]: I1011 03:57:36.342008 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-5bvx8"] Oct 11 03:57:36 crc kubenswrapper[4798]: I1011 03:57:36.342868 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-5bvx8" Oct 11 03:57:36 crc kubenswrapper[4798]: I1011 03:57:36.358367 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Oct 11 03:57:36 crc kubenswrapper[4798]: I1011 03:57:36.359139 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Oct 11 03:57:36 crc kubenswrapper[4798]: I1011 03:57:36.359647 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Oct 11 03:57:36 crc kubenswrapper[4798]: I1011 03:57:36.360170 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Oct 11 03:57:36 crc kubenswrapper[4798]: I1011 03:57:36.360300 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Oct 11 03:57:36 crc kubenswrapper[4798]: I1011 03:57:36.360306 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Oct 11 03:57:36 crc kubenswrapper[4798]: I1011 03:57:36.370574 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-5bvx8"] Oct 11 03:57:36 crc kubenswrapper[4798]: I1011 03:57:36.370920 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nrgv5\" (UniqueName: \"kubernetes.io/projected/de740119-5acc-48d0-aaa4-0a64e054cac8-kube-api-access-nrgv5\") pod \"redhat-marketplace-mvv66\" (UID: \"de740119-5acc-48d0-aaa4-0a64e054cac8\") " pod="openshift-marketplace/redhat-marketplace-mvv66" Oct 11 03:57:36 crc kubenswrapper[4798]: I1011 03:57:36.370991 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/de740119-5acc-48d0-aaa4-0a64e054cac8-catalog-content\") pod \"redhat-marketplace-mvv66\" (UID: \"de740119-5acc-48d0-aaa4-0a64e054cac8\") " pod="openshift-marketplace/redhat-marketplace-mvv66" Oct 11 03:57:36 crc kubenswrapper[4798]: I1011 03:57:36.371049 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/de740119-5acc-48d0-aaa4-0a64e054cac8-utilities\") pod \"redhat-marketplace-mvv66\" (UID: \"de740119-5acc-48d0-aaa4-0a64e054cac8\") " pod="openshift-marketplace/redhat-marketplace-mvv66" Oct 11 03:57:36 crc kubenswrapper[4798]: I1011 03:57:36.380049 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Oct 11 03:57:36 crc kubenswrapper[4798]: I1011 03:57:36.460638 4798 patch_prober.go:28] interesting pod/router-default-5444994796-kgrgq container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Oct 11 03:57:36 crc kubenswrapper[4798]: [-]has-synced failed: reason withheld Oct 11 03:57:36 crc kubenswrapper[4798]: [+]process-running ok Oct 11 03:57:36 crc kubenswrapper[4798]: healthz check failed Oct 11 03:57:36 crc kubenswrapper[4798]: I1011 03:57:36.460716 4798 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-kgrgq" podUID="78405f10-b1f1-4401-8a3e-579ff5a739e1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Oct 11 03:57:36 crc kubenswrapper[4798]: I1011 03:57:36.490927 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7b1d91a6-9b59-466c-a922-22825aac279b-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-5bvx8\" (UID: \"7b1d91a6-9b59-466c-a922-22825aac279b\") " pod="openshift-controller-manager/controller-manager-879f6c89f-5bvx8" Oct 11 03:57:36 crc kubenswrapper[4798]: I1011 03:57:36.490987 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7b1d91a6-9b59-466c-a922-22825aac279b-serving-cert\") pod \"controller-manager-879f6c89f-5bvx8\" (UID: \"7b1d91a6-9b59-466c-a922-22825aac279b\") " pod="openshift-controller-manager/controller-manager-879f6c89f-5bvx8" Oct 11 03:57:36 crc kubenswrapper[4798]: I1011 03:57:36.491057 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nrgv5\" (UniqueName: \"kubernetes.io/projected/de740119-5acc-48d0-aaa4-0a64e054cac8-kube-api-access-nrgv5\") pod \"redhat-marketplace-mvv66\" (UID: \"de740119-5acc-48d0-aaa4-0a64e054cac8\") " pod="openshift-marketplace/redhat-marketplace-mvv66" Oct 11 03:57:36 crc kubenswrapper[4798]: I1011 03:57:36.491084 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/de740119-5acc-48d0-aaa4-0a64e054cac8-catalog-content\") pod \"redhat-marketplace-mvv66\" (UID: \"de740119-5acc-48d0-aaa4-0a64e054cac8\") " pod="openshift-marketplace/redhat-marketplace-mvv66" Oct 11 03:57:36 crc kubenswrapper[4798]: I1011 03:57:36.491117 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/de740119-5acc-48d0-aaa4-0a64e054cac8-utilities\") pod \"redhat-marketplace-mvv66\" (UID: \"de740119-5acc-48d0-aaa4-0a64e054cac8\") " pod="openshift-marketplace/redhat-marketplace-mvv66" Oct 11 03:57:36 crc kubenswrapper[4798]: I1011 03:57:36.491147 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7b1d91a6-9b59-466c-a922-22825aac279b-config\") pod \"controller-manager-879f6c89f-5bvx8\" (UID: \"7b1d91a6-9b59-466c-a922-22825aac279b\") " pod="openshift-controller-manager/controller-manager-879f6c89f-5bvx8" Oct 11 03:57:36 crc kubenswrapper[4798]: I1011 03:57:36.491210 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xp9b6\" (UniqueName: \"kubernetes.io/projected/7b1d91a6-9b59-466c-a922-22825aac279b-kube-api-access-xp9b6\") pod \"controller-manager-879f6c89f-5bvx8\" (UID: \"7b1d91a6-9b59-466c-a922-22825aac279b\") " pod="openshift-controller-manager/controller-manager-879f6c89f-5bvx8" Oct 11 03:57:36 crc kubenswrapper[4798]: I1011 03:57:36.491244 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7b1d91a6-9b59-466c-a922-22825aac279b-client-ca\") pod \"controller-manager-879f6c89f-5bvx8\" (UID: \"7b1d91a6-9b59-466c-a922-22825aac279b\") " pod="openshift-controller-manager/controller-manager-879f6c89f-5bvx8" Oct 11 03:57:36 crc kubenswrapper[4798]: I1011 03:57:36.492607 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/de740119-5acc-48d0-aaa4-0a64e054cac8-utilities\") pod \"redhat-marketplace-mvv66\" (UID: \"de740119-5acc-48d0-aaa4-0a64e054cac8\") " pod="openshift-marketplace/redhat-marketplace-mvv66" Oct 11 03:57:36 crc kubenswrapper[4798]: I1011 03:57:36.492653 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/de740119-5acc-48d0-aaa4-0a64e054cac8-catalog-content\") pod \"redhat-marketplace-mvv66\" (UID: \"de740119-5acc-48d0-aaa4-0a64e054cac8\") " pod="openshift-marketplace/redhat-marketplace-mvv66" Oct 11 03:57:36 crc kubenswrapper[4798]: I1011 03:57:36.514631 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Oct 11 03:57:36 crc kubenswrapper[4798]: I1011 03:57:36.517513 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Oct 11 03:57:36 crc kubenswrapper[4798]: I1011 03:57:36.525859 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Oct 11 03:57:36 crc kubenswrapper[4798]: I1011 03:57:36.525983 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager"/"installer-sa-dockercfg-kjl2n" Oct 11 03:57:36 crc kubenswrapper[4798]: I1011 03:57:36.526784 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager"/"kube-root-ca.crt" Oct 11 03:57:36 crc kubenswrapper[4798]: I1011 03:57:36.529742 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nrgv5\" (UniqueName: \"kubernetes.io/projected/de740119-5acc-48d0-aaa4-0a64e054cac8-kube-api-access-nrgv5\") pod \"redhat-marketplace-mvv66\" (UID: \"de740119-5acc-48d0-aaa4-0a64e054cac8\") " pod="openshift-marketplace/redhat-marketplace-mvv66" Oct 11 03:57:36 crc kubenswrapper[4798]: I1011 03:57:36.557142 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29335905-vgmmx" Oct 11 03:57:36 crc kubenswrapper[4798]: I1011 03:57:36.592107 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7b1d91a6-9b59-466c-a922-22825aac279b-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-5bvx8\" (UID: \"7b1d91a6-9b59-466c-a922-22825aac279b\") " pod="openshift-controller-manager/controller-manager-879f6c89f-5bvx8" Oct 11 03:57:36 crc kubenswrapper[4798]: I1011 03:57:36.592164 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7b1d91a6-9b59-466c-a922-22825aac279b-serving-cert\") pod \"controller-manager-879f6c89f-5bvx8\" (UID: \"7b1d91a6-9b59-466c-a922-22825aac279b\") " pod="openshift-controller-manager/controller-manager-879f6c89f-5bvx8" Oct 11 03:57:36 crc kubenswrapper[4798]: I1011 03:57:36.592249 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7b1d91a6-9b59-466c-a922-22825aac279b-config\") pod \"controller-manager-879f6c89f-5bvx8\" (UID: \"7b1d91a6-9b59-466c-a922-22825aac279b\") " pod="openshift-controller-manager/controller-manager-879f6c89f-5bvx8" Oct 11 03:57:36 crc kubenswrapper[4798]: I1011 03:57:36.592286 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/23f9fc02-cc93-408a-89a7-3698f24744dd-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"23f9fc02-cc93-408a-89a7-3698f24744dd\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Oct 11 03:57:36 crc kubenswrapper[4798]: I1011 03:57:36.592336 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/23f9fc02-cc93-408a-89a7-3698f24744dd-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"23f9fc02-cc93-408a-89a7-3698f24744dd\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Oct 11 03:57:36 crc kubenswrapper[4798]: I1011 03:57:36.592360 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xp9b6\" (UniqueName: \"kubernetes.io/projected/7b1d91a6-9b59-466c-a922-22825aac279b-kube-api-access-xp9b6\") pod \"controller-manager-879f6c89f-5bvx8\" (UID: \"7b1d91a6-9b59-466c-a922-22825aac279b\") " pod="openshift-controller-manager/controller-manager-879f6c89f-5bvx8" Oct 11 03:57:36 crc kubenswrapper[4798]: I1011 03:57:36.592410 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7b1d91a6-9b59-466c-a922-22825aac279b-client-ca\") pod \"controller-manager-879f6c89f-5bvx8\" (UID: \"7b1d91a6-9b59-466c-a922-22825aac279b\") " pod="openshift-controller-manager/controller-manager-879f6c89f-5bvx8" Oct 11 03:57:36 crc kubenswrapper[4798]: I1011 03:57:36.593338 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7b1d91a6-9b59-466c-a922-22825aac279b-client-ca\") pod \"controller-manager-879f6c89f-5bvx8\" (UID: \"7b1d91a6-9b59-466c-a922-22825aac279b\") " pod="openshift-controller-manager/controller-manager-879f6c89f-5bvx8" Oct 11 03:57:36 crc kubenswrapper[4798]: I1011 03:57:36.594411 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7b1d91a6-9b59-466c-a922-22825aac279b-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-5bvx8\" (UID: \"7b1d91a6-9b59-466c-a922-22825aac279b\") " pod="openshift-controller-manager/controller-manager-879f6c89f-5bvx8" Oct 11 03:57:36 crc kubenswrapper[4798]: I1011 03:57:36.596552 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7b1d91a6-9b59-466c-a922-22825aac279b-config\") pod \"controller-manager-879f6c89f-5bvx8\" (UID: \"7b1d91a6-9b59-466c-a922-22825aac279b\") " pod="openshift-controller-manager/controller-manager-879f6c89f-5bvx8" Oct 11 03:57:36 crc kubenswrapper[4798]: I1011 03:57:36.601689 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7b1d91a6-9b59-466c-a922-22825aac279b-serving-cert\") pod \"controller-manager-879f6c89f-5bvx8\" (UID: \"7b1d91a6-9b59-466c-a922-22825aac279b\") " pod="openshift-controller-manager/controller-manager-879f6c89f-5bvx8" Oct 11 03:57:36 crc kubenswrapper[4798]: I1011 03:57:36.612587 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-mvv66" Oct 11 03:57:36 crc kubenswrapper[4798]: I1011 03:57:36.614106 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xp9b6\" (UniqueName: \"kubernetes.io/projected/7b1d91a6-9b59-466c-a922-22825aac279b-kube-api-access-xp9b6\") pod \"controller-manager-879f6c89f-5bvx8\" (UID: \"7b1d91a6-9b59-466c-a922-22825aac279b\") " pod="openshift-controller-manager/controller-manager-879f6c89f-5bvx8" Oct 11 03:57:36 crc kubenswrapper[4798]: I1011 03:57:36.661667 4798 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-f9d7485db-5lpj9" Oct 11 03:57:36 crc kubenswrapper[4798]: I1011 03:57:36.661722 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-f9d7485db-5lpj9" Oct 11 03:57:36 crc kubenswrapper[4798]: I1011 03:57:36.663298 4798 patch_prober.go:28] interesting pod/console-f9d7485db-5lpj9 container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.20:8443/health\": dial tcp 10.217.0.20:8443: connect: connection refused" start-of-body= Oct 11 03:57:36 crc kubenswrapper[4798]: I1011 03:57:36.663358 4798 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-5lpj9" podUID="79cb45d2-e74c-4bda-8464-2c2bbcdbcc4a" containerName="console" probeResult="failure" output="Get \"https://10.217.0.20:8443/health\": dial tcp 10.217.0.20:8443: connect: connection refused" Oct 11 03:57:36 crc kubenswrapper[4798]: I1011 03:57:36.684129 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-wz94f"] Oct 11 03:57:36 crc kubenswrapper[4798]: E1011 03:57:36.684506 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e5439d4a-8ce5-48c3-b0db-9e4b24ebfcdd" containerName="collect-profiles" Oct 11 03:57:36 crc kubenswrapper[4798]: I1011 03:57:36.684534 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="e5439d4a-8ce5-48c3-b0db-9e4b24ebfcdd" containerName="collect-profiles" Oct 11 03:57:36 crc kubenswrapper[4798]: I1011 03:57:36.684654 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="e5439d4a-8ce5-48c3-b0db-9e4b24ebfcdd" containerName="collect-profiles" Oct 11 03:57:36 crc kubenswrapper[4798]: I1011 03:57:36.685771 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-wz94f" Oct 11 03:57:36 crc kubenswrapper[4798]: I1011 03:57:36.689698 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-5bvx8" Oct 11 03:57:36 crc kubenswrapper[4798]: I1011 03:57:36.694737 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/e5439d4a-8ce5-48c3-b0db-9e4b24ebfcdd-config-volume\") pod \"e5439d4a-8ce5-48c3-b0db-9e4b24ebfcdd\" (UID: \"e5439d4a-8ce5-48c3-b0db-9e4b24ebfcdd\") " Oct 11 03:57:36 crc kubenswrapper[4798]: I1011 03:57:36.694862 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m9d7x\" (UniqueName: \"kubernetes.io/projected/e5439d4a-8ce5-48c3-b0db-9e4b24ebfcdd-kube-api-access-m9d7x\") pod \"e5439d4a-8ce5-48c3-b0db-9e4b24ebfcdd\" (UID: \"e5439d4a-8ce5-48c3-b0db-9e4b24ebfcdd\") " Oct 11 03:57:36 crc kubenswrapper[4798]: I1011 03:57:36.695036 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/e5439d4a-8ce5-48c3-b0db-9e4b24ebfcdd-secret-volume\") pod \"e5439d4a-8ce5-48c3-b0db-9e4b24ebfcdd\" (UID: \"e5439d4a-8ce5-48c3-b0db-9e4b24ebfcdd\") " Oct 11 03:57:36 crc kubenswrapper[4798]: I1011 03:57:36.695294 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/23f9fc02-cc93-408a-89a7-3698f24744dd-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"23f9fc02-cc93-408a-89a7-3698f24744dd\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Oct 11 03:57:36 crc kubenswrapper[4798]: I1011 03:57:36.695360 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/23f9fc02-cc93-408a-89a7-3698f24744dd-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"23f9fc02-cc93-408a-89a7-3698f24744dd\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Oct 11 03:57:36 crc kubenswrapper[4798]: I1011 03:57:36.695514 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/23f9fc02-cc93-408a-89a7-3698f24744dd-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"23f9fc02-cc93-408a-89a7-3698f24744dd\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Oct 11 03:57:36 crc kubenswrapper[4798]: I1011 03:57:36.696702 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e5439d4a-8ce5-48c3-b0db-9e4b24ebfcdd-config-volume" (OuterVolumeSpecName: "config-volume") pod "e5439d4a-8ce5-48c3-b0db-9e4b24ebfcdd" (UID: "e5439d4a-8ce5-48c3-b0db-9e4b24ebfcdd"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 03:57:36 crc kubenswrapper[4798]: I1011 03:57:36.698662 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-wz94f"] Oct 11 03:57:36 crc kubenswrapper[4798]: I1011 03:57:36.701098 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e5439d4a-8ce5-48c3-b0db-9e4b24ebfcdd-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "e5439d4a-8ce5-48c3-b0db-9e4b24ebfcdd" (UID: "e5439d4a-8ce5-48c3-b0db-9e4b24ebfcdd"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 03:57:36 crc kubenswrapper[4798]: I1011 03:57:36.707900 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e5439d4a-8ce5-48c3-b0db-9e4b24ebfcdd-kube-api-access-m9d7x" (OuterVolumeSpecName: "kube-api-access-m9d7x") pod "e5439d4a-8ce5-48c3-b0db-9e4b24ebfcdd" (UID: "e5439d4a-8ce5-48c3-b0db-9e4b24ebfcdd"). InnerVolumeSpecName "kube-api-access-m9d7x". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 03:57:36 crc kubenswrapper[4798]: I1011 03:57:36.727076 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/23f9fc02-cc93-408a-89a7-3698f24744dd-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"23f9fc02-cc93-408a-89a7-3698f24744dd\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Oct 11 03:57:36 crc kubenswrapper[4798]: I1011 03:57:36.806858 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a45b7815-8c30-409d-96d9-c387b09c7d6b-catalog-content\") pod \"redhat-marketplace-wz94f\" (UID: \"a45b7815-8c30-409d-96d9-c387b09c7d6b\") " pod="openshift-marketplace/redhat-marketplace-wz94f" Oct 11 03:57:36 crc kubenswrapper[4798]: I1011 03:57:36.807374 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a45b7815-8c30-409d-96d9-c387b09c7d6b-utilities\") pod \"redhat-marketplace-wz94f\" (UID: \"a45b7815-8c30-409d-96d9-c387b09c7d6b\") " pod="openshift-marketplace/redhat-marketplace-wz94f" Oct 11 03:57:36 crc kubenswrapper[4798]: I1011 03:57:36.807493 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9455d\" (UniqueName: \"kubernetes.io/projected/a45b7815-8c30-409d-96d9-c387b09c7d6b-kube-api-access-9455d\") pod \"redhat-marketplace-wz94f\" (UID: \"a45b7815-8c30-409d-96d9-c387b09c7d6b\") " pod="openshift-marketplace/redhat-marketplace-wz94f" Oct 11 03:57:36 crc kubenswrapper[4798]: I1011 03:57:36.807555 4798 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/e5439d4a-8ce5-48c3-b0db-9e4b24ebfcdd-secret-volume\") on node \"crc\" DevicePath \"\"" Oct 11 03:57:36 crc kubenswrapper[4798]: I1011 03:57:36.807568 4798 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/e5439d4a-8ce5-48c3-b0db-9e4b24ebfcdd-config-volume\") on node \"crc\" DevicePath \"\"" Oct 11 03:57:36 crc kubenswrapper[4798]: I1011 03:57:36.807578 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m9d7x\" (UniqueName: \"kubernetes.io/projected/e5439d4a-8ce5-48c3-b0db-9e4b24ebfcdd-kube-api-access-m9d7x\") on node \"crc\" DevicePath \"\"" Oct 11 03:57:36 crc kubenswrapper[4798]: I1011 03:57:36.846464 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-apiserver/apiserver-76f77b778f-db49p" Oct 11 03:57:36 crc kubenswrapper[4798]: I1011 03:57:36.846535 4798 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-apiserver/apiserver-76f77b778f-db49p" Oct 11 03:57:36 crc kubenswrapper[4798]: I1011 03:57:36.846768 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Oct 11 03:57:36 crc kubenswrapper[4798]: I1011 03:57:36.867180 4798 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-apiserver/apiserver-76f77b778f-db49p" Oct 11 03:57:36 crc kubenswrapper[4798]: I1011 03:57:36.909990 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a45b7815-8c30-409d-96d9-c387b09c7d6b-catalog-content\") pod \"redhat-marketplace-wz94f\" (UID: \"a45b7815-8c30-409d-96d9-c387b09c7d6b\") " pod="openshift-marketplace/redhat-marketplace-wz94f" Oct 11 03:57:36 crc kubenswrapper[4798]: I1011 03:57:36.910079 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a45b7815-8c30-409d-96d9-c387b09c7d6b-utilities\") pod \"redhat-marketplace-wz94f\" (UID: \"a45b7815-8c30-409d-96d9-c387b09c7d6b\") " pod="openshift-marketplace/redhat-marketplace-wz94f" Oct 11 03:57:36 crc kubenswrapper[4798]: I1011 03:57:36.910132 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9455d\" (UniqueName: \"kubernetes.io/projected/a45b7815-8c30-409d-96d9-c387b09c7d6b-kube-api-access-9455d\") pod \"redhat-marketplace-wz94f\" (UID: \"a45b7815-8c30-409d-96d9-c387b09c7d6b\") " pod="openshift-marketplace/redhat-marketplace-wz94f" Oct 11 03:57:36 crc kubenswrapper[4798]: I1011 03:57:36.911003 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a45b7815-8c30-409d-96d9-c387b09c7d6b-catalog-content\") pod \"redhat-marketplace-wz94f\" (UID: \"a45b7815-8c30-409d-96d9-c387b09c7d6b\") " pod="openshift-marketplace/redhat-marketplace-wz94f" Oct 11 03:57:36 crc kubenswrapper[4798]: I1011 03:57:36.911069 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a45b7815-8c30-409d-96d9-c387b09c7d6b-utilities\") pod \"redhat-marketplace-wz94f\" (UID: \"a45b7815-8c30-409d-96d9-c387b09c7d6b\") " pod="openshift-marketplace/redhat-marketplace-wz94f" Oct 11 03:57:36 crc kubenswrapper[4798]: I1011 03:57:36.952530 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9455d\" (UniqueName: \"kubernetes.io/projected/a45b7815-8c30-409d-96d9-c387b09c7d6b-kube-api-access-9455d\") pod \"redhat-marketplace-wz94f\" (UID: \"a45b7815-8c30-409d-96d9-c387b09c7d6b\") " pod="openshift-marketplace/redhat-marketplace-wz94f" Oct 11 03:57:36 crc kubenswrapper[4798]: I1011 03:57:36.957126 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-mvv66"] Oct 11 03:57:36 crc kubenswrapper[4798]: W1011 03:57:36.970583 4798 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podde740119_5acc_48d0_aaa4_0a64e054cac8.slice/crio-924ddd51156a9217e5427ac3f67f2e247a4f862fd6a55ad5cf120235db2e1d20 WatchSource:0}: Error finding container 924ddd51156a9217e5427ac3f67f2e247a4f862fd6a55ad5cf120235db2e1d20: Status 404 returned error can't find the container with id 924ddd51156a9217e5427ac3f67f2e247a4f862fd6a55ad5cf120235db2e1d20 Oct 11 03:57:37 crc kubenswrapper[4798]: I1011 03:57:37.016938 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-wz94f" Oct 11 03:57:37 crc kubenswrapper[4798]: I1011 03:57:37.190347 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Oct 11 03:57:37 crc kubenswrapper[4798]: W1011 03:57:37.218535 4798 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-pod23f9fc02_cc93_408a_89a7_3698f24744dd.slice/crio-125c1c2c260c23c35d1da8493f2f4b14e8f332bd91df9ce147f37f3635f7b3c4 WatchSource:0}: Error finding container 125c1c2c260c23c35d1da8493f2f4b14e8f332bd91df9ce147f37f3635f7b3c4: Status 404 returned error can't find the container with id 125c1c2c260c23c35d1da8493f2f4b14e8f332bd91df9ce147f37f3635f7b3c4 Oct 11 03:57:37 crc kubenswrapper[4798]: I1011 03:57:37.294245 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mvv66" event={"ID":"de740119-5acc-48d0-aaa4-0a64e054cac8","Type":"ContainerStarted","Data":"73a656b9737f12fccc32e8e0238c9d5f5a29a6bb03abf7bc1d249a34631be68a"} Oct 11 03:57:37 crc kubenswrapper[4798]: I1011 03:57:37.294708 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mvv66" event={"ID":"de740119-5acc-48d0-aaa4-0a64e054cac8","Type":"ContainerStarted","Data":"924ddd51156a9217e5427ac3f67f2e247a4f862fd6a55ad5cf120235db2e1d20"} Oct 11 03:57:37 crc kubenswrapper[4798]: I1011 03:57:37.304719 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-mlj2w"] Oct 11 03:57:37 crc kubenswrapper[4798]: I1011 03:57:37.306470 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-mlj2w" Oct 11 03:57:37 crc kubenswrapper[4798]: I1011 03:57:37.312218 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Oct 11 03:57:37 crc kubenswrapper[4798]: I1011 03:57:37.319465 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-5bvx8"] Oct 11 03:57:37 crc kubenswrapper[4798]: I1011 03:57:37.322255 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-mlj2w"] Oct 11 03:57:37 crc kubenswrapper[4798]: I1011 03:57:37.326740 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29335905-vgmmx" event={"ID":"e5439d4a-8ce5-48c3-b0db-9e4b24ebfcdd","Type":"ContainerDied","Data":"640248fd85837c6ebee9921630a6236f6d80d14758eba9da4879f8d0cd1e2835"} Oct 11 03:57:37 crc kubenswrapper[4798]: I1011 03:57:37.326797 4798 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="640248fd85837c6ebee9921630a6236f6d80d14758eba9da4879f8d0cd1e2835" Oct 11 03:57:37 crc kubenswrapper[4798]: I1011 03:57:37.326766 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29335905-vgmmx" Oct 11 03:57:37 crc kubenswrapper[4798]: I1011 03:57:37.344203 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"b2528278-dc60-43fd-8f3c-92182689a8d8","Type":"ContainerStarted","Data":"6030b38da46e27e469d83b63c399cca90866601f8ad2909bdcbfb1944bd75de9"} Oct 11 03:57:37 crc kubenswrapper[4798]: I1011 03:57:37.344557 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"b2528278-dc60-43fd-8f3c-92182689a8d8","Type":"ContainerStarted","Data":"20a5880c1815f4b514fc30778bb32ea0a910706ff78994089e9cc3ebd51805f3"} Oct 11 03:57:37 crc kubenswrapper[4798]: W1011 03:57:37.344676 4798 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod7b1d91a6_9b59_466c_a922_22825aac279b.slice/crio-fc2062458781ad220c015eee1187cd4892879287b02f7d33d96de621b30afb9c WatchSource:0}: Error finding container fc2062458781ad220c015eee1187cd4892879287b02f7d33d96de621b30afb9c: Status 404 returned error can't find the container with id fc2062458781ad220c015eee1187cd4892879287b02f7d33d96de621b30afb9c Oct 11 03:57:37 crc kubenswrapper[4798]: I1011 03:57:37.352520 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"23f9fc02-cc93-408a-89a7-3698f24744dd","Type":"ContainerStarted","Data":"125c1c2c260c23c35d1da8493f2f4b14e8f332bd91df9ce147f37f3635f7b3c4"} Oct 11 03:57:37 crc kubenswrapper[4798]: I1011 03:57:37.373265 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-697d97f7c8-bdw6s" Oct 11 03:57:37 crc kubenswrapper[4798]: I1011 03:57:37.401451 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-apiserver/apiserver-76f77b778f-db49p" Oct 11 03:57:37 crc kubenswrapper[4798]: I1011 03:57:37.401721 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-wz94f"] Oct 11 03:57:37 crc kubenswrapper[4798]: I1011 03:57:37.414627 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/revision-pruner-8-crc" podStartSLOduration=2.414601505 podStartE2EDuration="2.414601505s" podCreationTimestamp="2025-10-11 03:57:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 03:57:37.405035358 +0000 UTC m=+152.741325044" watchObservedRunningTime="2025-10-11 03:57:37.414601505 +0000 UTC m=+152.750891191" Oct 11 03:57:37 crc kubenswrapper[4798]: I1011 03:57:37.420653 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c2g9n\" (UniqueName: \"kubernetes.io/projected/c27e187f-5d34-4455-9a63-8e3f213139bc-kube-api-access-c2g9n\") pod \"redhat-operators-mlj2w\" (UID: \"c27e187f-5d34-4455-9a63-8e3f213139bc\") " pod="openshift-marketplace/redhat-operators-mlj2w" Oct 11 03:57:37 crc kubenswrapper[4798]: I1011 03:57:37.421029 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c27e187f-5d34-4455-9a63-8e3f213139bc-utilities\") pod \"redhat-operators-mlj2w\" (UID: \"c27e187f-5d34-4455-9a63-8e3f213139bc\") " pod="openshift-marketplace/redhat-operators-mlj2w" Oct 11 03:57:37 crc kubenswrapper[4798]: I1011 03:57:37.421164 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c27e187f-5d34-4455-9a63-8e3f213139bc-catalog-content\") pod \"redhat-operators-mlj2w\" (UID: \"c27e187f-5d34-4455-9a63-8e3f213139bc\") " pod="openshift-marketplace/redhat-operators-mlj2w" Oct 11 03:57:37 crc kubenswrapper[4798]: I1011 03:57:37.469693 4798 patch_prober.go:28] interesting pod/router-default-5444994796-kgrgq container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Oct 11 03:57:37 crc kubenswrapper[4798]: [-]has-synced failed: reason withheld Oct 11 03:57:37 crc kubenswrapper[4798]: [+]process-running ok Oct 11 03:57:37 crc kubenswrapper[4798]: healthz check failed Oct 11 03:57:37 crc kubenswrapper[4798]: I1011 03:57:37.469765 4798 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-kgrgq" podUID="78405f10-b1f1-4401-8a3e-579ff5a739e1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Oct 11 03:57:37 crc kubenswrapper[4798]: I1011 03:57:37.494038 4798 patch_prober.go:28] interesting pod/downloads-7954f5f757-g2k9v container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.17:8080/\": dial tcp 10.217.0.17:8080: connect: connection refused" start-of-body= Oct 11 03:57:37 crc kubenswrapper[4798]: I1011 03:57:37.494110 4798 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-g2k9v" podUID="55408d92-301d-4b54-9cb1-09dc11423c33" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.17:8080/\": dial tcp 10.217.0.17:8080: connect: connection refused" Oct 11 03:57:37 crc kubenswrapper[4798]: I1011 03:57:37.494255 4798 patch_prober.go:28] interesting pod/downloads-7954f5f757-g2k9v container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.17:8080/\": dial tcp 10.217.0.17:8080: connect: connection refused" start-of-body= Oct 11 03:57:37 crc kubenswrapper[4798]: I1011 03:57:37.494269 4798 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-g2k9v" podUID="55408d92-301d-4b54-9cb1-09dc11423c33" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.17:8080/\": dial tcp 10.217.0.17:8080: connect: connection refused" Oct 11 03:57:37 crc kubenswrapper[4798]: I1011 03:57:37.522674 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c2g9n\" (UniqueName: \"kubernetes.io/projected/c27e187f-5d34-4455-9a63-8e3f213139bc-kube-api-access-c2g9n\") pod \"redhat-operators-mlj2w\" (UID: \"c27e187f-5d34-4455-9a63-8e3f213139bc\") " pod="openshift-marketplace/redhat-operators-mlj2w" Oct 11 03:57:37 crc kubenswrapper[4798]: I1011 03:57:37.522752 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c27e187f-5d34-4455-9a63-8e3f213139bc-utilities\") pod \"redhat-operators-mlj2w\" (UID: \"c27e187f-5d34-4455-9a63-8e3f213139bc\") " pod="openshift-marketplace/redhat-operators-mlj2w" Oct 11 03:57:37 crc kubenswrapper[4798]: I1011 03:57:37.522788 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c27e187f-5d34-4455-9a63-8e3f213139bc-catalog-content\") pod \"redhat-operators-mlj2w\" (UID: \"c27e187f-5d34-4455-9a63-8e3f213139bc\") " pod="openshift-marketplace/redhat-operators-mlj2w" Oct 11 03:57:37 crc kubenswrapper[4798]: I1011 03:57:37.523372 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c27e187f-5d34-4455-9a63-8e3f213139bc-catalog-content\") pod \"redhat-operators-mlj2w\" (UID: \"c27e187f-5d34-4455-9a63-8e3f213139bc\") " pod="openshift-marketplace/redhat-operators-mlj2w" Oct 11 03:57:37 crc kubenswrapper[4798]: I1011 03:57:37.524073 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c27e187f-5d34-4455-9a63-8e3f213139bc-utilities\") pod \"redhat-operators-mlj2w\" (UID: \"c27e187f-5d34-4455-9a63-8e3f213139bc\") " pod="openshift-marketplace/redhat-operators-mlj2w" Oct 11 03:57:37 crc kubenswrapper[4798]: I1011 03:57:37.543432 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d9eedcac-7bb5-4ddf-a856-387e2fb253b1" path="/var/lib/kubelet/pods/d9eedcac-7bb5-4ddf-a856-387e2fb253b1/volumes" Oct 11 03:57:37 crc kubenswrapper[4798]: I1011 03:57:37.544167 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ingress/router-default-5444994796-kgrgq" Oct 11 03:57:37 crc kubenswrapper[4798]: I1011 03:57:37.571187 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c2g9n\" (UniqueName: \"kubernetes.io/projected/c27e187f-5d34-4455-9a63-8e3f213139bc-kube-api-access-c2g9n\") pod \"redhat-operators-mlj2w\" (UID: \"c27e187f-5d34-4455-9a63-8e3f213139bc\") " pod="openshift-marketplace/redhat-operators-mlj2w" Oct 11 03:57:37 crc kubenswrapper[4798]: I1011 03:57:37.643727 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-dp4kr" Oct 11 03:57:37 crc kubenswrapper[4798]: I1011 03:57:37.671183 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-mlj2w" Oct 11 03:57:37 crc kubenswrapper[4798]: I1011 03:57:37.699845 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-xxb82"] Oct 11 03:57:37 crc kubenswrapper[4798]: I1011 03:57:37.701137 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-xxb82" Oct 11 03:57:37 crc kubenswrapper[4798]: I1011 03:57:37.716591 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-xxb82"] Oct 11 03:57:37 crc kubenswrapper[4798]: I1011 03:57:37.733002 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/036b2b20-470d-4ea9-a0ef-10927444dfda-utilities\") pod \"redhat-operators-xxb82\" (UID: \"036b2b20-470d-4ea9-a0ef-10927444dfda\") " pod="openshift-marketplace/redhat-operators-xxb82" Oct 11 03:57:37 crc kubenswrapper[4798]: I1011 03:57:37.733060 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rhjws\" (UniqueName: \"kubernetes.io/projected/036b2b20-470d-4ea9-a0ef-10927444dfda-kube-api-access-rhjws\") pod \"redhat-operators-xxb82\" (UID: \"036b2b20-470d-4ea9-a0ef-10927444dfda\") " pod="openshift-marketplace/redhat-operators-xxb82" Oct 11 03:57:37 crc kubenswrapper[4798]: I1011 03:57:37.733105 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/036b2b20-470d-4ea9-a0ef-10927444dfda-catalog-content\") pod \"redhat-operators-xxb82\" (UID: \"036b2b20-470d-4ea9-a0ef-10927444dfda\") " pod="openshift-marketplace/redhat-operators-xxb82" Oct 11 03:57:37 crc kubenswrapper[4798]: I1011 03:57:37.836467 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/036b2b20-470d-4ea9-a0ef-10927444dfda-catalog-content\") pod \"redhat-operators-xxb82\" (UID: \"036b2b20-470d-4ea9-a0ef-10927444dfda\") " pod="openshift-marketplace/redhat-operators-xxb82" Oct 11 03:57:37 crc kubenswrapper[4798]: I1011 03:57:37.837134 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/036b2b20-470d-4ea9-a0ef-10927444dfda-utilities\") pod \"redhat-operators-xxb82\" (UID: \"036b2b20-470d-4ea9-a0ef-10927444dfda\") " pod="openshift-marketplace/redhat-operators-xxb82" Oct 11 03:57:37 crc kubenswrapper[4798]: I1011 03:57:37.837173 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rhjws\" (UniqueName: \"kubernetes.io/projected/036b2b20-470d-4ea9-a0ef-10927444dfda-kube-api-access-rhjws\") pod \"redhat-operators-xxb82\" (UID: \"036b2b20-470d-4ea9-a0ef-10927444dfda\") " pod="openshift-marketplace/redhat-operators-xxb82" Oct 11 03:57:37 crc kubenswrapper[4798]: I1011 03:57:37.838185 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/036b2b20-470d-4ea9-a0ef-10927444dfda-catalog-content\") pod \"redhat-operators-xxb82\" (UID: \"036b2b20-470d-4ea9-a0ef-10927444dfda\") " pod="openshift-marketplace/redhat-operators-xxb82" Oct 11 03:57:37 crc kubenswrapper[4798]: I1011 03:57:37.838505 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/036b2b20-470d-4ea9-a0ef-10927444dfda-utilities\") pod \"redhat-operators-xxb82\" (UID: \"036b2b20-470d-4ea9-a0ef-10927444dfda\") " pod="openshift-marketplace/redhat-operators-xxb82" Oct 11 03:57:37 crc kubenswrapper[4798]: I1011 03:57:37.865749 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rhjws\" (UniqueName: \"kubernetes.io/projected/036b2b20-470d-4ea9-a0ef-10927444dfda-kube-api-access-rhjws\") pod \"redhat-operators-xxb82\" (UID: \"036b2b20-470d-4ea9-a0ef-10927444dfda\") " pod="openshift-marketplace/redhat-operators-xxb82" Oct 11 03:57:38 crc kubenswrapper[4798]: I1011 03:57:38.056282 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-xxb82" Oct 11 03:57:38 crc kubenswrapper[4798]: I1011 03:57:38.364373 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"23f9fc02-cc93-408a-89a7-3698f24744dd","Type":"ContainerStarted","Data":"8ccb901883fb0a0d6361feab1f8f9390e2c3eb8fd887a1ab7a7486fd0a3e1735"} Oct 11 03:57:38 crc kubenswrapper[4798]: I1011 03:57:38.371780 4798 generic.go:334] "Generic (PLEG): container finished" podID="de740119-5acc-48d0-aaa4-0a64e054cac8" containerID="73a656b9737f12fccc32e8e0238c9d5f5a29a6bb03abf7bc1d249a34631be68a" exitCode=0 Oct 11 03:57:38 crc kubenswrapper[4798]: I1011 03:57:38.371868 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mvv66" event={"ID":"de740119-5acc-48d0-aaa4-0a64e054cac8","Type":"ContainerDied","Data":"73a656b9737f12fccc32e8e0238c9d5f5a29a6bb03abf7bc1d249a34631be68a"} Oct 11 03:57:38 crc kubenswrapper[4798]: I1011 03:57:38.377271 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-mlj2w"] Oct 11 03:57:38 crc kubenswrapper[4798]: I1011 03:57:38.393758 4798 generic.go:334] "Generic (PLEG): container finished" podID="a45b7815-8c30-409d-96d9-c387b09c7d6b" containerID="ae8942f7ac5926ae9ddb20bf0a4e71c9f13cd6a90b97aaa54b4ff1d3f781ec2c" exitCode=0 Oct 11 03:57:38 crc kubenswrapper[4798]: I1011 03:57:38.393884 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-wz94f" event={"ID":"a45b7815-8c30-409d-96d9-c387b09c7d6b","Type":"ContainerDied","Data":"ae8942f7ac5926ae9ddb20bf0a4e71c9f13cd6a90b97aaa54b4ff1d3f781ec2c"} Oct 11 03:57:38 crc kubenswrapper[4798]: I1011 03:57:38.393921 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-wz94f" event={"ID":"a45b7815-8c30-409d-96d9-c387b09c7d6b","Type":"ContainerStarted","Data":"d76e773b6f6762bc348c8e30432f2bf0c14be2dfbcef1c368cb69998da469d3e"} Oct 11 03:57:38 crc kubenswrapper[4798]: I1011 03:57:38.394107 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager/revision-pruner-9-crc" podStartSLOduration=2.3940754269999998 podStartE2EDuration="2.394075427s" podCreationTimestamp="2025-10-11 03:57:36 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 03:57:38.391232261 +0000 UTC m=+153.727521937" watchObservedRunningTime="2025-10-11 03:57:38.394075427 +0000 UTC m=+153.730365113" Oct 11 03:57:38 crc kubenswrapper[4798]: I1011 03:57:38.411360 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-5bvx8" event={"ID":"7b1d91a6-9b59-466c-a922-22825aac279b","Type":"ContainerStarted","Data":"ce4e7cc4cf2a388205de17dcb03bc4b4436ac73c02c91b5520a5b53aeb79e280"} Oct 11 03:57:38 crc kubenswrapper[4798]: I1011 03:57:38.411445 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-5bvx8" event={"ID":"7b1d91a6-9b59-466c-a922-22825aac279b","Type":"ContainerStarted","Data":"fc2062458781ad220c015eee1187cd4892879287b02f7d33d96de621b30afb9c"} Oct 11 03:57:38 crc kubenswrapper[4798]: I1011 03:57:38.412822 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-879f6c89f-5bvx8" Oct 11 03:57:38 crc kubenswrapper[4798]: W1011 03:57:38.424294 4798 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc27e187f_5d34_4455_9a63_8e3f213139bc.slice/crio-ef435e418ee8aa655df4dd191a6afb07ef992677311560b0f5aeed1f8bcf787e WatchSource:0}: Error finding container ef435e418ee8aa655df4dd191a6afb07ef992677311560b0f5aeed1f8bcf787e: Status 404 returned error can't find the container with id ef435e418ee8aa655df4dd191a6afb07ef992677311560b0f5aeed1f8bcf787e Oct 11 03:57:38 crc kubenswrapper[4798]: I1011 03:57:38.425531 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-879f6c89f-5bvx8" Oct 11 03:57:38 crc kubenswrapper[4798]: I1011 03:57:38.433978 4798 generic.go:334] "Generic (PLEG): container finished" podID="b2528278-dc60-43fd-8f3c-92182689a8d8" containerID="6030b38da46e27e469d83b63c399cca90866601f8ad2909bdcbfb1944bd75de9" exitCode=0 Oct 11 03:57:38 crc kubenswrapper[4798]: I1011 03:57:38.434093 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"b2528278-dc60-43fd-8f3c-92182689a8d8","Type":"ContainerDied","Data":"6030b38da46e27e469d83b63c399cca90866601f8ad2909bdcbfb1944bd75de9"} Oct 11 03:57:38 crc kubenswrapper[4798]: I1011 03:57:38.465783 4798 patch_prober.go:28] interesting pod/router-default-5444994796-kgrgq container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Oct 11 03:57:38 crc kubenswrapper[4798]: [-]has-synced failed: reason withheld Oct 11 03:57:38 crc kubenswrapper[4798]: [+]process-running ok Oct 11 03:57:38 crc kubenswrapper[4798]: healthz check failed Oct 11 03:57:38 crc kubenswrapper[4798]: I1011 03:57:38.467751 4798 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-kgrgq" podUID="78405f10-b1f1-4401-8a3e-579ff5a739e1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Oct 11 03:57:38 crc kubenswrapper[4798]: I1011 03:57:38.489092 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-xxb82"] Oct 11 03:57:38 crc kubenswrapper[4798]: I1011 03:57:38.508496 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-879f6c89f-5bvx8" podStartSLOduration=4.508465355 podStartE2EDuration="4.508465355s" podCreationTimestamp="2025-10-11 03:57:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 03:57:38.493653048 +0000 UTC m=+153.829942754" watchObservedRunningTime="2025-10-11 03:57:38.508465355 +0000 UTC m=+153.844755041" Oct 11 03:57:38 crc kubenswrapper[4798]: I1011 03:57:38.545363 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-svt4z" Oct 11 03:57:39 crc kubenswrapper[4798]: I1011 03:57:39.459612 4798 patch_prober.go:28] interesting pod/router-default-5444994796-kgrgq container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Oct 11 03:57:39 crc kubenswrapper[4798]: [-]has-synced failed: reason withheld Oct 11 03:57:39 crc kubenswrapper[4798]: [+]process-running ok Oct 11 03:57:39 crc kubenswrapper[4798]: healthz check failed Oct 11 03:57:39 crc kubenswrapper[4798]: I1011 03:57:39.460229 4798 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-kgrgq" podUID="78405f10-b1f1-4401-8a3e-579ff5a739e1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Oct 11 03:57:39 crc kubenswrapper[4798]: I1011 03:57:39.460239 4798 generic.go:334] "Generic (PLEG): container finished" podID="23f9fc02-cc93-408a-89a7-3698f24744dd" containerID="8ccb901883fb0a0d6361feab1f8f9390e2c3eb8fd887a1ab7a7486fd0a3e1735" exitCode=0 Oct 11 03:57:39 crc kubenswrapper[4798]: I1011 03:57:39.460326 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"23f9fc02-cc93-408a-89a7-3698f24744dd","Type":"ContainerDied","Data":"8ccb901883fb0a0d6361feab1f8f9390e2c3eb8fd887a1ab7a7486fd0a3e1735"} Oct 11 03:57:39 crc kubenswrapper[4798]: I1011 03:57:39.474536 4798 generic.go:334] "Generic (PLEG): container finished" podID="036b2b20-470d-4ea9-a0ef-10927444dfda" containerID="c64cb309e2d5d960144a816f02e5007ca9285307de0602169b39b7db7cf9a0ab" exitCode=0 Oct 11 03:57:39 crc kubenswrapper[4798]: I1011 03:57:39.474674 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xxb82" event={"ID":"036b2b20-470d-4ea9-a0ef-10927444dfda","Type":"ContainerDied","Data":"c64cb309e2d5d960144a816f02e5007ca9285307de0602169b39b7db7cf9a0ab"} Oct 11 03:57:39 crc kubenswrapper[4798]: I1011 03:57:39.474714 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xxb82" event={"ID":"036b2b20-470d-4ea9-a0ef-10927444dfda","Type":"ContainerStarted","Data":"3898cb6361e16848ff6d6c5ea82700d86ec0d560943c789d5b4905f826f260f7"} Oct 11 03:57:39 crc kubenswrapper[4798]: I1011 03:57:39.487484 4798 generic.go:334] "Generic (PLEG): container finished" podID="c27e187f-5d34-4455-9a63-8e3f213139bc" containerID="49aa1adfff2fd4438b35d1760053c7ecb92242a82ae6576cfeb4c84eddf99f09" exitCode=0 Oct 11 03:57:39 crc kubenswrapper[4798]: I1011 03:57:39.487569 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mlj2w" event={"ID":"c27e187f-5d34-4455-9a63-8e3f213139bc","Type":"ContainerDied","Data":"49aa1adfff2fd4438b35d1760053c7ecb92242a82ae6576cfeb4c84eddf99f09"} Oct 11 03:57:39 crc kubenswrapper[4798]: I1011 03:57:39.487674 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mlj2w" event={"ID":"c27e187f-5d34-4455-9a63-8e3f213139bc","Type":"ContainerStarted","Data":"ef435e418ee8aa655df4dd191a6afb07ef992677311560b0f5aeed1f8bcf787e"} Oct 11 03:57:39 crc kubenswrapper[4798]: I1011 03:57:39.874826 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Oct 11 03:57:39 crc kubenswrapper[4798]: I1011 03:57:39.990676 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/b2528278-dc60-43fd-8f3c-92182689a8d8-kubelet-dir\") pod \"b2528278-dc60-43fd-8f3c-92182689a8d8\" (UID: \"b2528278-dc60-43fd-8f3c-92182689a8d8\") " Oct 11 03:57:39 crc kubenswrapper[4798]: I1011 03:57:39.990817 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/b2528278-dc60-43fd-8f3c-92182689a8d8-kube-api-access\") pod \"b2528278-dc60-43fd-8f3c-92182689a8d8\" (UID: \"b2528278-dc60-43fd-8f3c-92182689a8d8\") " Oct 11 03:57:39 crc kubenswrapper[4798]: I1011 03:57:39.991075 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/b2528278-dc60-43fd-8f3c-92182689a8d8-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "b2528278-dc60-43fd-8f3c-92182689a8d8" (UID: "b2528278-dc60-43fd-8f3c-92182689a8d8"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 11 03:57:40 crc kubenswrapper[4798]: I1011 03:57:40.007728 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b2528278-dc60-43fd-8f3c-92182689a8d8-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "b2528278-dc60-43fd-8f3c-92182689a8d8" (UID: "b2528278-dc60-43fd-8f3c-92182689a8d8"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 03:57:40 crc kubenswrapper[4798]: I1011 03:57:40.092401 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/b2528278-dc60-43fd-8f3c-92182689a8d8-kube-api-access\") on node \"crc\" DevicePath \"\"" Oct 11 03:57:40 crc kubenswrapper[4798]: I1011 03:57:40.092445 4798 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/b2528278-dc60-43fd-8f3c-92182689a8d8-kubelet-dir\") on node \"crc\" DevicePath \"\"" Oct 11 03:57:40 crc kubenswrapper[4798]: I1011 03:57:40.460352 4798 patch_prober.go:28] interesting pod/router-default-5444994796-kgrgq container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Oct 11 03:57:40 crc kubenswrapper[4798]: [-]has-synced failed: reason withheld Oct 11 03:57:40 crc kubenswrapper[4798]: [+]process-running ok Oct 11 03:57:40 crc kubenswrapper[4798]: healthz check failed Oct 11 03:57:40 crc kubenswrapper[4798]: I1011 03:57:40.460925 4798 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-kgrgq" podUID="78405f10-b1f1-4401-8a3e-579ff5a739e1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Oct 11 03:57:40 crc kubenswrapper[4798]: I1011 03:57:40.532114 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"b2528278-dc60-43fd-8f3c-92182689a8d8","Type":"ContainerDied","Data":"20a5880c1815f4b514fc30778bb32ea0a910706ff78994089e9cc3ebd51805f3"} Oct 11 03:57:40 crc kubenswrapper[4798]: I1011 03:57:40.532178 4798 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="20a5880c1815f4b514fc30778bb32ea0a910706ff78994089e9cc3ebd51805f3" Oct 11 03:57:40 crc kubenswrapper[4798]: I1011 03:57:40.532244 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Oct 11 03:57:41 crc kubenswrapper[4798]: I1011 03:57:41.091220 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Oct 11 03:57:41 crc kubenswrapper[4798]: I1011 03:57:41.220888 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/23f9fc02-cc93-408a-89a7-3698f24744dd-kube-api-access\") pod \"23f9fc02-cc93-408a-89a7-3698f24744dd\" (UID: \"23f9fc02-cc93-408a-89a7-3698f24744dd\") " Oct 11 03:57:41 crc kubenswrapper[4798]: I1011 03:57:41.221304 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/23f9fc02-cc93-408a-89a7-3698f24744dd-kubelet-dir\") pod \"23f9fc02-cc93-408a-89a7-3698f24744dd\" (UID: \"23f9fc02-cc93-408a-89a7-3698f24744dd\") " Oct 11 03:57:41 crc kubenswrapper[4798]: I1011 03:57:41.221421 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/23f9fc02-cc93-408a-89a7-3698f24744dd-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "23f9fc02-cc93-408a-89a7-3698f24744dd" (UID: "23f9fc02-cc93-408a-89a7-3698f24744dd"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 11 03:57:41 crc kubenswrapper[4798]: I1011 03:57:41.221939 4798 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/23f9fc02-cc93-408a-89a7-3698f24744dd-kubelet-dir\") on node \"crc\" DevicePath \"\"" Oct 11 03:57:41 crc kubenswrapper[4798]: I1011 03:57:41.229594 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/23f9fc02-cc93-408a-89a7-3698f24744dd-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "23f9fc02-cc93-408a-89a7-3698f24744dd" (UID: "23f9fc02-cc93-408a-89a7-3698f24744dd"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 03:57:41 crc kubenswrapper[4798]: I1011 03:57:41.323588 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/23f9fc02-cc93-408a-89a7-3698f24744dd-kube-api-access\") on node \"crc\" DevicePath \"\"" Oct 11 03:57:41 crc kubenswrapper[4798]: I1011 03:57:41.458747 4798 patch_prober.go:28] interesting pod/router-default-5444994796-kgrgq container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Oct 11 03:57:41 crc kubenswrapper[4798]: [-]has-synced failed: reason withheld Oct 11 03:57:41 crc kubenswrapper[4798]: [+]process-running ok Oct 11 03:57:41 crc kubenswrapper[4798]: healthz check failed Oct 11 03:57:41 crc kubenswrapper[4798]: I1011 03:57:41.458864 4798 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-kgrgq" podUID="78405f10-b1f1-4401-8a3e-579ff5a739e1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Oct 11 03:57:41 crc kubenswrapper[4798]: I1011 03:57:41.565088 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"23f9fc02-cc93-408a-89a7-3698f24744dd","Type":"ContainerDied","Data":"125c1c2c260c23c35d1da8493f2f4b14e8f332bd91df9ce147f37f3635f7b3c4"} Oct 11 03:57:41 crc kubenswrapper[4798]: I1011 03:57:41.565144 4798 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="125c1c2c260c23c35d1da8493f2f4b14e8f332bd91df9ce147f37f3635f7b3c4" Oct 11 03:57:41 crc kubenswrapper[4798]: I1011 03:57:41.565183 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Oct 11 03:57:42 crc kubenswrapper[4798]: I1011 03:57:42.456905 4798 patch_prober.go:28] interesting pod/router-default-5444994796-kgrgq container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Oct 11 03:57:42 crc kubenswrapper[4798]: [-]has-synced failed: reason withheld Oct 11 03:57:42 crc kubenswrapper[4798]: [+]process-running ok Oct 11 03:57:42 crc kubenswrapper[4798]: healthz check failed Oct 11 03:57:42 crc kubenswrapper[4798]: I1011 03:57:42.456983 4798 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-kgrgq" podUID="78405f10-b1f1-4401-8a3e-579ff5a739e1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Oct 11 03:57:42 crc kubenswrapper[4798]: I1011 03:57:42.674479 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-dns/dns-default-z5h6j" Oct 11 03:57:43 crc kubenswrapper[4798]: I1011 03:57:43.458365 4798 patch_prober.go:28] interesting pod/router-default-5444994796-kgrgq container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Oct 11 03:57:43 crc kubenswrapper[4798]: [-]has-synced failed: reason withheld Oct 11 03:57:43 crc kubenswrapper[4798]: [+]process-running ok Oct 11 03:57:43 crc kubenswrapper[4798]: healthz check failed Oct 11 03:57:43 crc kubenswrapper[4798]: I1011 03:57:43.458520 4798 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-kgrgq" podUID="78405f10-b1f1-4401-8a3e-579ff5a739e1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Oct 11 03:57:44 crc kubenswrapper[4798]: I1011 03:57:44.459140 4798 patch_prober.go:28] interesting pod/router-default-5444994796-kgrgq container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Oct 11 03:57:44 crc kubenswrapper[4798]: [-]has-synced failed: reason withheld Oct 11 03:57:44 crc kubenswrapper[4798]: [+]process-running ok Oct 11 03:57:44 crc kubenswrapper[4798]: healthz check failed Oct 11 03:57:44 crc kubenswrapper[4798]: I1011 03:57:44.461107 4798 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-kgrgq" podUID="78405f10-b1f1-4401-8a3e-579ff5a739e1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Oct 11 03:57:45 crc kubenswrapper[4798]: I1011 03:57:45.457831 4798 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-ingress/router-default-5444994796-kgrgq" Oct 11 03:57:45 crc kubenswrapper[4798]: I1011 03:57:45.460833 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ingress/router-default-5444994796-kgrgq" Oct 11 03:57:46 crc kubenswrapper[4798]: I1011 03:57:46.661062 4798 patch_prober.go:28] interesting pod/console-f9d7485db-5lpj9 container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.20:8443/health\": dial tcp 10.217.0.20:8443: connect: connection refused" start-of-body= Oct 11 03:57:46 crc kubenswrapper[4798]: I1011 03:57:46.661668 4798 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-5lpj9" podUID="79cb45d2-e74c-4bda-8464-2c2bbcdbcc4a" containerName="console" probeResult="failure" output="Get \"https://10.217.0.20:8443/health\": dial tcp 10.217.0.20:8443: connect: connection refused" Oct 11 03:57:47 crc kubenswrapper[4798]: I1011 03:57:47.486782 4798 patch_prober.go:28] interesting pod/downloads-7954f5f757-g2k9v container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.17:8080/\": dial tcp 10.217.0.17:8080: connect: connection refused" start-of-body= Oct 11 03:57:47 crc kubenswrapper[4798]: I1011 03:57:47.487208 4798 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-g2k9v" podUID="55408d92-301d-4b54-9cb1-09dc11423c33" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.17:8080/\": dial tcp 10.217.0.17:8080: connect: connection refused" Oct 11 03:57:47 crc kubenswrapper[4798]: I1011 03:57:47.486823 4798 patch_prober.go:28] interesting pod/downloads-7954f5f757-g2k9v container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.17:8080/\": dial tcp 10.217.0.17:8080: connect: connection refused" start-of-body= Oct 11 03:57:47 crc kubenswrapper[4798]: I1011 03:57:47.487324 4798 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-g2k9v" podUID="55408d92-301d-4b54-9cb1-09dc11423c33" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.17:8080/\": dial tcp 10.217.0.17:8080: connect: connection refused" Oct 11 03:57:47 crc kubenswrapper[4798]: I1011 03:57:47.538302 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/3cf2b185-9a26-4448-8fc5-4885f98daf87-metrics-certs\") pod \"network-metrics-daemon-5bfzt\" (UID: \"3cf2b185-9a26-4448-8fc5-4885f98daf87\") " pod="openshift-multus/network-metrics-daemon-5bfzt" Oct 11 03:57:47 crc kubenswrapper[4798]: I1011 03:57:47.545946 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/3cf2b185-9a26-4448-8fc5-4885f98daf87-metrics-certs\") pod \"network-metrics-daemon-5bfzt\" (UID: \"3cf2b185-9a26-4448-8fc5-4885f98daf87\") " pod="openshift-multus/network-metrics-daemon-5bfzt" Oct 11 03:57:47 crc kubenswrapper[4798]: I1011 03:57:47.639957 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-5bfzt" Oct 11 03:57:55 crc kubenswrapper[4798]: I1011 03:57:55.280659 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-697d97f7c8-bdw6s" Oct 11 03:57:56 crc kubenswrapper[4798]: I1011 03:57:56.668477 4798 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-f9d7485db-5lpj9" Oct 11 03:57:56 crc kubenswrapper[4798]: I1011 03:57:56.677446 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-f9d7485db-5lpj9" Oct 11 03:57:57 crc kubenswrapper[4798]: I1011 03:57:57.138614 4798 patch_prober.go:28] interesting pod/machine-config-daemon-h28s2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 11 03:57:57 crc kubenswrapper[4798]: I1011 03:57:57.138698 4798 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 11 03:57:57 crc kubenswrapper[4798]: I1011 03:57:57.493819 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/downloads-7954f5f757-g2k9v" Oct 11 03:58:05 crc kubenswrapper[4798]: E1011 03:58:05.823966 4798 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Oct 11 03:58:05 crc kubenswrapper[4798]: E1011 03:58:05.825233 4798 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-snwss,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-7k4v2_openshift-marketplace(08878c03-1346-41ac-9f4f-874cd48e2129): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Oct 11 03:58:05 crc kubenswrapper[4798]: E1011 03:58:05.826486 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/certified-operators-7k4v2" podUID="08878c03-1346-41ac-9f4f-874cd48e2129" Oct 11 03:58:07 crc kubenswrapper[4798]: E1011 03:58:07.429924 4798 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Oct 11 03:58:07 crc kubenswrapper[4798]: E1011 03:58:07.430108 4798 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-wlq27,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-n8zfl_openshift-marketplace(b37ceeac-8a39-4c11-8e4a-51047efe0da4): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Oct 11 03:58:07 crc kubenswrapper[4798]: E1011 03:58:07.431244 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/certified-operators-n8zfl" podUID="b37ceeac-8a39-4c11-8e4a-51047efe0da4" Oct 11 03:58:07 crc kubenswrapper[4798]: I1011 03:58:07.868257 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-jtrv7" Oct 11 03:58:07 crc kubenswrapper[4798]: E1011 03:58:07.917731 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-n8zfl" podUID="b37ceeac-8a39-4c11-8e4a-51047efe0da4" Oct 11 03:58:07 crc kubenswrapper[4798]: E1011 03:58:07.918087 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-7k4v2" podUID="08878c03-1346-41ac-9f4f-874cd48e2129" Oct 11 03:58:09 crc kubenswrapper[4798]: E1011 03:58:09.047758 4798 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/community-operator-index:v4.18" Oct 11 03:58:09 crc kubenswrapper[4798]: E1011 03:58:09.048623 4798 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-72rr6,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-x4ngf_openshift-marketplace(07406786-26c9-42e3-9602-64c9cbe7f235): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Oct 11 03:58:09 crc kubenswrapper[4798]: E1011 03:58:09.049941 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/community-operators-x4ngf" podUID="07406786-26c9-42e3-9602-64c9cbe7f235" Oct 11 03:58:13 crc kubenswrapper[4798]: I1011 03:58:13.884830 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 11 03:58:14 crc kubenswrapper[4798]: E1011 03:58:14.375128 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"\"" pod="openshift-marketplace/community-operators-x4ngf" podUID="07406786-26c9-42e3-9602-64c9cbe7f235" Oct 11 03:58:16 crc kubenswrapper[4798]: E1011 03:58:16.451591 4798 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-operator-index:v4.18" Oct 11 03:58:16 crc kubenswrapper[4798]: E1011 03:58:16.451792 4798 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-rhjws,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-operators-xxb82_openshift-marketplace(036b2b20-470d-4ea9-a0ef-10927444dfda): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Oct 11 03:58:16 crc kubenswrapper[4798]: E1011 03:58:16.453018 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-operators-xxb82" podUID="036b2b20-470d-4ea9-a0ef-10927444dfda" Oct 11 03:58:17 crc kubenswrapper[4798]: E1011 03:58:17.482743 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-operators-xxb82" podUID="036b2b20-470d-4ea9-a0ef-10927444dfda" Oct 11 03:58:17 crc kubenswrapper[4798]: I1011 03:58:17.825939 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-5bfzt"] Oct 11 03:58:17 crc kubenswrapper[4798]: I1011 03:58:17.847694 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-5bfzt" event={"ID":"3cf2b185-9a26-4448-8fc5-4885f98daf87","Type":"ContainerStarted","Data":"01272021f733f44a378796649b8df602453d2475fe868a7b9a639f93e6c78d55"} Oct 11 03:58:19 crc kubenswrapper[4798]: E1011 03:58:19.201630 4798 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.18" Oct 11 03:58:19 crc kubenswrapper[4798]: E1011 03:58:19.201872 4798 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-marketplace-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-9455d,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-marketplace-wz94f_openshift-marketplace(a45b7815-8c30-409d-96d9-c387b09c7d6b): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Oct 11 03:58:19 crc kubenswrapper[4798]: E1011 03:58:19.203003 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-marketplace-wz94f" podUID="a45b7815-8c30-409d-96d9-c387b09c7d6b" Oct 11 03:58:19 crc kubenswrapper[4798]: E1011 03:58:19.416446 4798 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/community-operator-index:v4.18" Oct 11 03:58:19 crc kubenswrapper[4798]: E1011 03:58:19.416959 4798 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-g48hs,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-l8bxj_openshift-marketplace(5df1000c-7c15-48cd-adcc-0c286e651fad): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Oct 11 03:58:19 crc kubenswrapper[4798]: E1011 03:58:19.418189 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/community-operators-l8bxj" podUID="5df1000c-7c15-48cd-adcc-0c286e651fad" Oct 11 03:58:19 crc kubenswrapper[4798]: E1011 03:58:19.442243 4798 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-operator-index:v4.18" Oct 11 03:58:19 crc kubenswrapper[4798]: E1011 03:58:19.442660 4798 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-c2g9n,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-operators-mlj2w_openshift-marketplace(c27e187f-5d34-4455-9a63-8e3f213139bc): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Oct 11 03:58:19 crc kubenswrapper[4798]: E1011 03:58:19.443871 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-operators-mlj2w" podUID="c27e187f-5d34-4455-9a63-8e3f213139bc" Oct 11 03:58:19 crc kubenswrapper[4798]: E1011 03:58:19.490615 4798 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.18" Oct 11 03:58:19 crc kubenswrapper[4798]: E1011 03:58:19.490875 4798 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-marketplace-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-nrgv5,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-marketplace-mvv66_openshift-marketplace(de740119-5acc-48d0-aaa4-0a64e054cac8): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Oct 11 03:58:19 crc kubenswrapper[4798]: E1011 03:58:19.492702 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-marketplace-mvv66" podUID="de740119-5acc-48d0-aaa4-0a64e054cac8" Oct 11 03:58:19 crc kubenswrapper[4798]: I1011 03:58:19.867731 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-5bfzt" event={"ID":"3cf2b185-9a26-4448-8fc5-4885f98daf87","Type":"ContainerStarted","Data":"29eea18f703bdbe7e6931c6d966a200f612954ef759df2dc11254c7c90db242d"} Oct 11 03:58:19 crc kubenswrapper[4798]: I1011 03:58:19.868218 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-5bfzt" event={"ID":"3cf2b185-9a26-4448-8fc5-4885f98daf87","Type":"ContainerStarted","Data":"4ae454d9c2c9efd2c45e6d3de07907194f3455219935e5f855057ac4270de9c1"} Oct 11 03:58:19 crc kubenswrapper[4798]: E1011 03:58:19.869901 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-marketplace-wz94f" podUID="a45b7815-8c30-409d-96d9-c387b09c7d6b" Oct 11 03:58:19 crc kubenswrapper[4798]: E1011 03:58:19.870440 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-operators-mlj2w" podUID="c27e187f-5d34-4455-9a63-8e3f213139bc" Oct 11 03:58:19 crc kubenswrapper[4798]: E1011 03:58:19.872136 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"\"" pod="openshift-marketplace/community-operators-l8bxj" podUID="5df1000c-7c15-48cd-adcc-0c286e651fad" Oct 11 03:58:19 crc kubenswrapper[4798]: E1011 03:58:19.872238 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-marketplace-mvv66" podUID="de740119-5acc-48d0-aaa4-0a64e054cac8" Oct 11 03:58:20 crc kubenswrapper[4798]: I1011 03:58:20.451133 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/network-metrics-daemon-5bfzt" podStartSLOduration=175.451100592 podStartE2EDuration="2m55.451100592s" podCreationTimestamp="2025-10-11 03:55:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 03:58:19.992243621 +0000 UTC m=+195.328533317" watchObservedRunningTime="2025-10-11 03:58:20.451100592 +0000 UTC m=+195.787390278" Oct 11 03:58:21 crc kubenswrapper[4798]: I1011 03:58:21.884019 4798 generic.go:334] "Generic (PLEG): container finished" podID="08878c03-1346-41ac-9f4f-874cd48e2129" containerID="f5ee9e499c97b5b304f4c8253e94df3aaca4553ae12ed04782c2c3841182f4e5" exitCode=0 Oct 11 03:58:21 crc kubenswrapper[4798]: I1011 03:58:21.884150 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-7k4v2" event={"ID":"08878c03-1346-41ac-9f4f-874cd48e2129","Type":"ContainerDied","Data":"f5ee9e499c97b5b304f4c8253e94df3aaca4553ae12ed04782c2c3841182f4e5"} Oct 11 03:58:22 crc kubenswrapper[4798]: I1011 03:58:22.897170 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-7k4v2" event={"ID":"08878c03-1346-41ac-9f4f-874cd48e2129","Type":"ContainerStarted","Data":"cc9e901028fc84af065c9375550e0e1d67d2ba4280feb013c3d3cac12ad60489"} Oct 11 03:58:22 crc kubenswrapper[4798]: I1011 03:58:22.927515 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-7k4v2" podStartSLOduration=2.832377482 podStartE2EDuration="48.927495164s" podCreationTimestamp="2025-10-11 03:57:34 +0000 UTC" firstStartedPulling="2025-10-11 03:57:36.238717914 +0000 UTC m=+151.575007600" lastFinishedPulling="2025-10-11 03:58:22.333835586 +0000 UTC m=+197.670125282" observedRunningTime="2025-10-11 03:58:22.92601699 +0000 UTC m=+198.262306716" watchObservedRunningTime="2025-10-11 03:58:22.927495164 +0000 UTC m=+198.263784840" Oct 11 03:58:24 crc kubenswrapper[4798]: I1011 03:58:24.835651 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-7k4v2" Oct 11 03:58:24 crc kubenswrapper[4798]: I1011 03:58:24.837589 4798 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-7k4v2" Oct 11 03:58:24 crc kubenswrapper[4798]: I1011 03:58:24.915071 4798 generic.go:334] "Generic (PLEG): container finished" podID="b37ceeac-8a39-4c11-8e4a-51047efe0da4" containerID="f01cf1bb88a48a066aae33922c49709b08da0d8c68317991fbbb857d4a777f0f" exitCode=0 Oct 11 03:58:24 crc kubenswrapper[4798]: I1011 03:58:24.916072 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-n8zfl" event={"ID":"b37ceeac-8a39-4c11-8e4a-51047efe0da4","Type":"ContainerDied","Data":"f01cf1bb88a48a066aae33922c49709b08da0d8c68317991fbbb857d4a777f0f"} Oct 11 03:58:25 crc kubenswrapper[4798]: I1011 03:58:25.115202 4798 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-7k4v2" Oct 11 03:58:25 crc kubenswrapper[4798]: I1011 03:58:25.927373 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-n8zfl" event={"ID":"b37ceeac-8a39-4c11-8e4a-51047efe0da4","Type":"ContainerStarted","Data":"bf33f07be426f06dcec8d49a849c17e4c147b86c619b5262c3261f5eb25279ae"} Oct 11 03:58:25 crc kubenswrapper[4798]: I1011 03:58:25.963887 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-n8zfl" podStartSLOduration=2.847116544 podStartE2EDuration="51.963856806s" podCreationTimestamp="2025-10-11 03:57:34 +0000 UTC" firstStartedPulling="2025-10-11 03:57:36.236454126 +0000 UTC m=+151.572743802" lastFinishedPulling="2025-10-11 03:58:25.353194338 +0000 UTC m=+200.689484064" observedRunningTime="2025-10-11 03:58:25.958741105 +0000 UTC m=+201.295030791" watchObservedRunningTime="2025-10-11 03:58:25.963856806 +0000 UTC m=+201.300146532" Oct 11 03:58:27 crc kubenswrapper[4798]: I1011 03:58:27.138897 4798 patch_prober.go:28] interesting pod/machine-config-daemon-h28s2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 11 03:58:27 crc kubenswrapper[4798]: I1011 03:58:27.139528 4798 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 11 03:58:27 crc kubenswrapper[4798]: I1011 03:58:27.139619 4798 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" Oct 11 03:58:27 crc kubenswrapper[4798]: I1011 03:58:27.140798 4798 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"d4862faa12fa49bd57098b8b4e9e9dd287da8c8808a9ca32bfdd3a9b4313945d"} pod="openshift-machine-config-operator/machine-config-daemon-h28s2" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Oct 11 03:58:27 crc kubenswrapper[4798]: I1011 03:58:27.140925 4798 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" containerName="machine-config-daemon" containerID="cri-o://d4862faa12fa49bd57098b8b4e9e9dd287da8c8808a9ca32bfdd3a9b4313945d" gracePeriod=600 Oct 11 03:58:27 crc kubenswrapper[4798]: E1011 03:58:27.350228 4798 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod42571bc8_2186_4e3b_bba9_28f5a8f364d0.slice/crio-conmon-d4862faa12fa49bd57098b8b4e9e9dd287da8c8808a9ca32bfdd3a9b4313945d.scope\": RecentStats: unable to find data in memory cache]" Oct 11 03:58:27 crc kubenswrapper[4798]: I1011 03:58:27.944955 4798 generic.go:334] "Generic (PLEG): container finished" podID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" containerID="d4862faa12fa49bd57098b8b4e9e9dd287da8c8808a9ca32bfdd3a9b4313945d" exitCode=0 Oct 11 03:58:27 crc kubenswrapper[4798]: I1011 03:58:27.945072 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" event={"ID":"42571bc8-2186-4e3b-bba9-28f5a8f364d0","Type":"ContainerDied","Data":"d4862faa12fa49bd57098b8b4e9e9dd287da8c8808a9ca32bfdd3a9b4313945d"} Oct 11 03:58:27 crc kubenswrapper[4798]: I1011 03:58:27.945436 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" event={"ID":"42571bc8-2186-4e3b-bba9-28f5a8f364d0","Type":"ContainerStarted","Data":"166d4bff6ae77be3247b1d1d53d8215160cadf3101be02557d941e30fb2763c3"} Oct 11 03:58:30 crc kubenswrapper[4798]: I1011 03:58:30.968714 4798 generic.go:334] "Generic (PLEG): container finished" podID="07406786-26c9-42e3-9602-64c9cbe7f235" containerID="8109bfa453061ec223baf6735b13be6be983c67bb8494929e7bdb5b83cda8867" exitCode=0 Oct 11 03:58:30 crc kubenswrapper[4798]: I1011 03:58:30.969528 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-x4ngf" event={"ID":"07406786-26c9-42e3-9602-64c9cbe7f235","Type":"ContainerDied","Data":"8109bfa453061ec223baf6735b13be6be983c67bb8494929e7bdb5b83cda8867"} Oct 11 03:58:31 crc kubenswrapper[4798]: I1011 03:58:31.979180 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xxb82" event={"ID":"036b2b20-470d-4ea9-a0ef-10927444dfda","Type":"ContainerStarted","Data":"f532b1c4db33ca16a839c5a880b7fe8a634125664860fa2f4c0fb7f6c329eeb2"} Oct 11 03:58:31 crc kubenswrapper[4798]: I1011 03:58:31.982746 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-x4ngf" event={"ID":"07406786-26c9-42e3-9602-64c9cbe7f235","Type":"ContainerStarted","Data":"815d6c4f81f95befe3e0b31b2244cfa71998a12724b3ee31d22e4dbfac7a0e72"} Oct 11 03:58:32 crc kubenswrapper[4798]: I1011 03:58:32.991120 4798 generic.go:334] "Generic (PLEG): container finished" podID="036b2b20-470d-4ea9-a0ef-10927444dfda" containerID="f532b1c4db33ca16a839c5a880b7fe8a634125664860fa2f4c0fb7f6c329eeb2" exitCode=0 Oct 11 03:58:32 crc kubenswrapper[4798]: I1011 03:58:32.991249 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xxb82" event={"ID":"036b2b20-470d-4ea9-a0ef-10927444dfda","Type":"ContainerDied","Data":"f532b1c4db33ca16a839c5a880b7fe8a634125664860fa2f4c0fb7f6c329eeb2"} Oct 11 03:58:33 crc kubenswrapper[4798]: I1011 03:58:33.020186 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-x4ngf" podStartSLOduration=3.817511347 podStartE2EDuration="59.020166447s" podCreationTimestamp="2025-10-11 03:57:34 +0000 UTC" firstStartedPulling="2025-10-11 03:57:36.242283911 +0000 UTC m=+151.578573597" lastFinishedPulling="2025-10-11 03:58:31.444939011 +0000 UTC m=+206.781228697" observedRunningTime="2025-10-11 03:58:33.017633062 +0000 UTC m=+208.353922748" watchObservedRunningTime="2025-10-11 03:58:33.020166447 +0000 UTC m=+208.356456133" Oct 11 03:58:34 crc kubenswrapper[4798]: I1011 03:58:34.002319 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xxb82" event={"ID":"036b2b20-470d-4ea9-a0ef-10927444dfda","Type":"ContainerStarted","Data":"30e8039c1efde14c73bdc2abff8774461aef57211dfca9a6ca4ee08eaf419a7c"} Oct 11 03:58:34 crc kubenswrapper[4798]: I1011 03:58:34.007195 4798 generic.go:334] "Generic (PLEG): container finished" podID="c27e187f-5d34-4455-9a63-8e3f213139bc" containerID="6d0807f660ce06a144606531ffe289b31590cba612f84f6ffde035e2e3245316" exitCode=0 Oct 11 03:58:34 crc kubenswrapper[4798]: I1011 03:58:34.007267 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mlj2w" event={"ID":"c27e187f-5d34-4455-9a63-8e3f213139bc","Type":"ContainerDied","Data":"6d0807f660ce06a144606531ffe289b31590cba612f84f6ffde035e2e3245316"} Oct 11 03:58:34 crc kubenswrapper[4798]: I1011 03:58:34.014145 4798 generic.go:334] "Generic (PLEG): container finished" podID="a45b7815-8c30-409d-96d9-c387b09c7d6b" containerID="fd16003dfc4324eb16c16e3408df5eff42c9fe5c786b4dd631f0d1ea53944161" exitCode=0 Oct 11 03:58:34 crc kubenswrapper[4798]: I1011 03:58:34.014229 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-wz94f" event={"ID":"a45b7815-8c30-409d-96d9-c387b09c7d6b","Type":"ContainerDied","Data":"fd16003dfc4324eb16c16e3408df5eff42c9fe5c786b4dd631f0d1ea53944161"} Oct 11 03:58:34 crc kubenswrapper[4798]: I1011 03:58:34.036150 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-xxb82" podStartSLOduration=3.077546331 podStartE2EDuration="57.036094396s" podCreationTimestamp="2025-10-11 03:57:37 +0000 UTC" firstStartedPulling="2025-10-11 03:57:39.481667348 +0000 UTC m=+154.817957034" lastFinishedPulling="2025-10-11 03:58:33.440215423 +0000 UTC m=+208.776505099" observedRunningTime="2025-10-11 03:58:34.033084507 +0000 UTC m=+209.369374213" watchObservedRunningTime="2025-10-11 03:58:34.036094396 +0000 UTC m=+209.372384092" Oct 11 03:58:34 crc kubenswrapper[4798]: I1011 03:58:34.887119 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-7k4v2" Oct 11 03:58:34 crc kubenswrapper[4798]: I1011 03:58:34.904633 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-x4ngf" Oct 11 03:58:34 crc kubenswrapper[4798]: I1011 03:58:34.904686 4798 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-x4ngf" Oct 11 03:58:35 crc kubenswrapper[4798]: I1011 03:58:35.011075 4798 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-x4ngf" Oct 11 03:58:35 crc kubenswrapper[4798]: I1011 03:58:35.022097 4798 generic.go:334] "Generic (PLEG): container finished" podID="5df1000c-7c15-48cd-adcc-0c286e651fad" containerID="8c80ff94306640d8cabe32af18507336b3a88695d320bc359824476a4482bc08" exitCode=0 Oct 11 03:58:35 crc kubenswrapper[4798]: I1011 03:58:35.022201 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-l8bxj" event={"ID":"5df1000c-7c15-48cd-adcc-0c286e651fad","Type":"ContainerDied","Data":"8c80ff94306640d8cabe32af18507336b3a88695d320bc359824476a4482bc08"} Oct 11 03:58:35 crc kubenswrapper[4798]: I1011 03:58:35.024805 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mlj2w" event={"ID":"c27e187f-5d34-4455-9a63-8e3f213139bc","Type":"ContainerStarted","Data":"a9d117cb51c4ad6aa00b2cd2223a188d200bccdc723ffbcf9600962b2c855743"} Oct 11 03:58:35 crc kubenswrapper[4798]: I1011 03:58:35.025948 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-n8zfl" Oct 11 03:58:35 crc kubenswrapper[4798]: I1011 03:58:35.026571 4798 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-n8zfl" Oct 11 03:58:35 crc kubenswrapper[4798]: I1011 03:58:35.071461 4798 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-n8zfl" Oct 11 03:58:35 crc kubenswrapper[4798]: I1011 03:58:35.081046 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-mlj2w" podStartSLOduration=3.143680053 podStartE2EDuration="58.081030999s" podCreationTimestamp="2025-10-11 03:57:37 +0000 UTC" firstStartedPulling="2025-10-11 03:57:39.490240316 +0000 UTC m=+154.826530002" lastFinishedPulling="2025-10-11 03:58:34.427591252 +0000 UTC m=+209.763880948" observedRunningTime="2025-10-11 03:58:35.078097232 +0000 UTC m=+210.414386918" watchObservedRunningTime="2025-10-11 03:58:35.081030999 +0000 UTC m=+210.417320675" Oct 11 03:58:36 crc kubenswrapper[4798]: I1011 03:58:36.036541 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-wz94f" event={"ID":"a45b7815-8c30-409d-96d9-c387b09c7d6b","Type":"ContainerStarted","Data":"8ec5049297124f0bcb28669c65f3892430a1f9e436575736523aede24854d8e3"} Oct 11 03:58:36 crc kubenswrapper[4798]: I1011 03:58:36.063845 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-wz94f" podStartSLOduration=3.9133335970000003 podStartE2EDuration="1m0.063823763s" podCreationTimestamp="2025-10-11 03:57:36 +0000 UTC" firstStartedPulling="2025-10-11 03:57:38.396169281 +0000 UTC m=+153.732458967" lastFinishedPulling="2025-10-11 03:58:34.546659447 +0000 UTC m=+209.882949133" observedRunningTime="2025-10-11 03:58:36.061606928 +0000 UTC m=+211.397896614" watchObservedRunningTime="2025-10-11 03:58:36.063823763 +0000 UTC m=+211.400113449" Oct 11 03:58:36 crc kubenswrapper[4798]: I1011 03:58:36.099636 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-n8zfl" Oct 11 03:58:37 crc kubenswrapper[4798]: I1011 03:58:37.018638 4798 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-wz94f" Oct 11 03:58:37 crc kubenswrapper[4798]: I1011 03:58:37.019119 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-wz94f" Oct 11 03:58:37 crc kubenswrapper[4798]: I1011 03:58:37.063195 4798 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-wz94f" Oct 11 03:58:37 crc kubenswrapper[4798]: I1011 03:58:37.066073 4798 generic.go:334] "Generic (PLEG): container finished" podID="de740119-5acc-48d0-aaa4-0a64e054cac8" containerID="3635947bc7598f7b73d8b70fcbb228c5aa862eaa9a1dd8b93e7af0d379d14d99" exitCode=0 Oct 11 03:58:37 crc kubenswrapper[4798]: I1011 03:58:37.066194 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mvv66" event={"ID":"de740119-5acc-48d0-aaa4-0a64e054cac8","Type":"ContainerDied","Data":"3635947bc7598f7b73d8b70fcbb228c5aa862eaa9a1dd8b93e7af0d379d14d99"} Oct 11 03:58:37 crc kubenswrapper[4798]: I1011 03:58:37.075047 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-l8bxj" event={"ID":"5df1000c-7c15-48cd-adcc-0c286e651fad","Type":"ContainerStarted","Data":"25402c0fe51dc0118b2de6a1cedb19603b70fe7057bd26b5b115a6c96d3424a0"} Oct 11 03:58:37 crc kubenswrapper[4798]: I1011 03:58:37.110924 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-l8bxj" podStartSLOduration=3.262259202 podStartE2EDuration="1m3.110900159s" podCreationTimestamp="2025-10-11 03:57:34 +0000 UTC" firstStartedPulling="2025-10-11 03:57:36.246518799 +0000 UTC m=+151.582808485" lastFinishedPulling="2025-10-11 03:58:36.095159756 +0000 UTC m=+211.431449442" observedRunningTime="2025-10-11 03:58:37.110283622 +0000 UTC m=+212.446573328" watchObservedRunningTime="2025-10-11 03:58:37.110900159 +0000 UTC m=+212.447189845" Oct 11 03:58:37 crc kubenswrapper[4798]: I1011 03:58:37.672094 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-mlj2w" Oct 11 03:58:37 crc kubenswrapper[4798]: I1011 03:58:37.672611 4798 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-mlj2w" Oct 11 03:58:38 crc kubenswrapper[4798]: I1011 03:58:38.057779 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-xxb82" Oct 11 03:58:38 crc kubenswrapper[4798]: I1011 03:58:38.057993 4798 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-xxb82" Oct 11 03:58:38 crc kubenswrapper[4798]: I1011 03:58:38.095846 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mvv66" event={"ID":"de740119-5acc-48d0-aaa4-0a64e054cac8","Type":"ContainerStarted","Data":"c2b5a728290701acd3977918422f343f12e40c5d6926425a845cce330bd70c57"} Oct 11 03:58:38 crc kubenswrapper[4798]: I1011 03:58:38.118425 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-mvv66" podStartSLOduration=2.961367297 podStartE2EDuration="1m2.11837527s" podCreationTimestamp="2025-10-11 03:57:36 +0000 UTC" firstStartedPulling="2025-10-11 03:57:38.37758026 +0000 UTC m=+153.713869946" lastFinishedPulling="2025-10-11 03:58:37.534588223 +0000 UTC m=+212.870877919" observedRunningTime="2025-10-11 03:58:38.113895319 +0000 UTC m=+213.450185015" watchObservedRunningTime="2025-10-11 03:58:38.11837527 +0000 UTC m=+213.454664956" Oct 11 03:58:38 crc kubenswrapper[4798]: I1011 03:58:38.705727 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-n8zfl"] Oct 11 03:58:38 crc kubenswrapper[4798]: I1011 03:58:38.720034 4798 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-mlj2w" podUID="c27e187f-5d34-4455-9a63-8e3f213139bc" containerName="registry-server" probeResult="failure" output=< Oct 11 03:58:38 crc kubenswrapper[4798]: timeout: failed to connect service ":50051" within 1s Oct 11 03:58:38 crc kubenswrapper[4798]: > Oct 11 03:58:39 crc kubenswrapper[4798]: I1011 03:58:39.103336 4798 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-n8zfl" podUID="b37ceeac-8a39-4c11-8e4a-51047efe0da4" containerName="registry-server" containerID="cri-o://bf33f07be426f06dcec8d49a849c17e4c147b86c619b5262c3261f5eb25279ae" gracePeriod=2 Oct 11 03:58:39 crc kubenswrapper[4798]: I1011 03:58:39.111256 4798 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-xxb82" podUID="036b2b20-470d-4ea9-a0ef-10927444dfda" containerName="registry-server" probeResult="failure" output=< Oct 11 03:58:39 crc kubenswrapper[4798]: timeout: failed to connect service ":50051" within 1s Oct 11 03:58:39 crc kubenswrapper[4798]: > Oct 11 03:58:40 crc kubenswrapper[4798]: I1011 03:58:39.987883 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-n8zfl" Oct 11 03:58:40 crc kubenswrapper[4798]: I1011 03:58:40.079762 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b37ceeac-8a39-4c11-8e4a-51047efe0da4-utilities\") pod \"b37ceeac-8a39-4c11-8e4a-51047efe0da4\" (UID: \"b37ceeac-8a39-4c11-8e4a-51047efe0da4\") " Oct 11 03:58:40 crc kubenswrapper[4798]: I1011 03:58:40.079822 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b37ceeac-8a39-4c11-8e4a-51047efe0da4-catalog-content\") pod \"b37ceeac-8a39-4c11-8e4a-51047efe0da4\" (UID: \"b37ceeac-8a39-4c11-8e4a-51047efe0da4\") " Oct 11 03:58:40 crc kubenswrapper[4798]: I1011 03:58:40.079851 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wlq27\" (UniqueName: \"kubernetes.io/projected/b37ceeac-8a39-4c11-8e4a-51047efe0da4-kube-api-access-wlq27\") pod \"b37ceeac-8a39-4c11-8e4a-51047efe0da4\" (UID: \"b37ceeac-8a39-4c11-8e4a-51047efe0da4\") " Oct 11 03:58:40 crc kubenswrapper[4798]: I1011 03:58:40.080862 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b37ceeac-8a39-4c11-8e4a-51047efe0da4-utilities" (OuterVolumeSpecName: "utilities") pod "b37ceeac-8a39-4c11-8e4a-51047efe0da4" (UID: "b37ceeac-8a39-4c11-8e4a-51047efe0da4"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 03:58:40 crc kubenswrapper[4798]: I1011 03:58:40.087599 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b37ceeac-8a39-4c11-8e4a-51047efe0da4-kube-api-access-wlq27" (OuterVolumeSpecName: "kube-api-access-wlq27") pod "b37ceeac-8a39-4c11-8e4a-51047efe0da4" (UID: "b37ceeac-8a39-4c11-8e4a-51047efe0da4"). InnerVolumeSpecName "kube-api-access-wlq27". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 03:58:40 crc kubenswrapper[4798]: I1011 03:58:40.113915 4798 generic.go:334] "Generic (PLEG): container finished" podID="b37ceeac-8a39-4c11-8e4a-51047efe0da4" containerID="bf33f07be426f06dcec8d49a849c17e4c147b86c619b5262c3261f5eb25279ae" exitCode=0 Oct 11 03:58:40 crc kubenswrapper[4798]: I1011 03:58:40.113998 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-n8zfl" event={"ID":"b37ceeac-8a39-4c11-8e4a-51047efe0da4","Type":"ContainerDied","Data":"bf33f07be426f06dcec8d49a849c17e4c147b86c619b5262c3261f5eb25279ae"} Oct 11 03:58:40 crc kubenswrapper[4798]: I1011 03:58:40.114086 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-n8zfl" event={"ID":"b37ceeac-8a39-4c11-8e4a-51047efe0da4","Type":"ContainerDied","Data":"dbf2289716cfd47493ff89b4b3ad329e13f380bcb773a11282c5e8524377ef80"} Oct 11 03:58:40 crc kubenswrapper[4798]: I1011 03:58:40.114102 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-n8zfl" Oct 11 03:58:40 crc kubenswrapper[4798]: I1011 03:58:40.114112 4798 scope.go:117] "RemoveContainer" containerID="bf33f07be426f06dcec8d49a849c17e4c147b86c619b5262c3261f5eb25279ae" Oct 11 03:58:40 crc kubenswrapper[4798]: I1011 03:58:40.133198 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b37ceeac-8a39-4c11-8e4a-51047efe0da4-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b37ceeac-8a39-4c11-8e4a-51047efe0da4" (UID: "b37ceeac-8a39-4c11-8e4a-51047efe0da4"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 03:58:40 crc kubenswrapper[4798]: I1011 03:58:40.133993 4798 scope.go:117] "RemoveContainer" containerID="f01cf1bb88a48a066aae33922c49709b08da0d8c68317991fbbb857d4a777f0f" Oct 11 03:58:40 crc kubenswrapper[4798]: I1011 03:58:40.157275 4798 scope.go:117] "RemoveContainer" containerID="e9a0e48e65a5ffb8d4e9fcd7eedaa1687ad26db4e1d0ed0ae59c27722254bc14" Oct 11 03:58:40 crc kubenswrapper[4798]: I1011 03:58:40.175913 4798 scope.go:117] "RemoveContainer" containerID="bf33f07be426f06dcec8d49a849c17e4c147b86c619b5262c3261f5eb25279ae" Oct 11 03:58:40 crc kubenswrapper[4798]: E1011 03:58:40.176819 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bf33f07be426f06dcec8d49a849c17e4c147b86c619b5262c3261f5eb25279ae\": container with ID starting with bf33f07be426f06dcec8d49a849c17e4c147b86c619b5262c3261f5eb25279ae not found: ID does not exist" containerID="bf33f07be426f06dcec8d49a849c17e4c147b86c619b5262c3261f5eb25279ae" Oct 11 03:58:40 crc kubenswrapper[4798]: I1011 03:58:40.176866 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bf33f07be426f06dcec8d49a849c17e4c147b86c619b5262c3261f5eb25279ae"} err="failed to get container status \"bf33f07be426f06dcec8d49a849c17e4c147b86c619b5262c3261f5eb25279ae\": rpc error: code = NotFound desc = could not find container \"bf33f07be426f06dcec8d49a849c17e4c147b86c619b5262c3261f5eb25279ae\": container with ID starting with bf33f07be426f06dcec8d49a849c17e4c147b86c619b5262c3261f5eb25279ae not found: ID does not exist" Oct 11 03:58:40 crc kubenswrapper[4798]: I1011 03:58:40.176896 4798 scope.go:117] "RemoveContainer" containerID="f01cf1bb88a48a066aae33922c49709b08da0d8c68317991fbbb857d4a777f0f" Oct 11 03:58:40 crc kubenswrapper[4798]: E1011 03:58:40.177240 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f01cf1bb88a48a066aae33922c49709b08da0d8c68317991fbbb857d4a777f0f\": container with ID starting with f01cf1bb88a48a066aae33922c49709b08da0d8c68317991fbbb857d4a777f0f not found: ID does not exist" containerID="f01cf1bb88a48a066aae33922c49709b08da0d8c68317991fbbb857d4a777f0f" Oct 11 03:58:40 crc kubenswrapper[4798]: I1011 03:58:40.177295 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f01cf1bb88a48a066aae33922c49709b08da0d8c68317991fbbb857d4a777f0f"} err="failed to get container status \"f01cf1bb88a48a066aae33922c49709b08da0d8c68317991fbbb857d4a777f0f\": rpc error: code = NotFound desc = could not find container \"f01cf1bb88a48a066aae33922c49709b08da0d8c68317991fbbb857d4a777f0f\": container with ID starting with f01cf1bb88a48a066aae33922c49709b08da0d8c68317991fbbb857d4a777f0f not found: ID does not exist" Oct 11 03:58:40 crc kubenswrapper[4798]: I1011 03:58:40.177330 4798 scope.go:117] "RemoveContainer" containerID="e9a0e48e65a5ffb8d4e9fcd7eedaa1687ad26db4e1d0ed0ae59c27722254bc14" Oct 11 03:58:40 crc kubenswrapper[4798]: E1011 03:58:40.177691 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e9a0e48e65a5ffb8d4e9fcd7eedaa1687ad26db4e1d0ed0ae59c27722254bc14\": container with ID starting with e9a0e48e65a5ffb8d4e9fcd7eedaa1687ad26db4e1d0ed0ae59c27722254bc14 not found: ID does not exist" containerID="e9a0e48e65a5ffb8d4e9fcd7eedaa1687ad26db4e1d0ed0ae59c27722254bc14" Oct 11 03:58:40 crc kubenswrapper[4798]: I1011 03:58:40.177714 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e9a0e48e65a5ffb8d4e9fcd7eedaa1687ad26db4e1d0ed0ae59c27722254bc14"} err="failed to get container status \"e9a0e48e65a5ffb8d4e9fcd7eedaa1687ad26db4e1d0ed0ae59c27722254bc14\": rpc error: code = NotFound desc = could not find container \"e9a0e48e65a5ffb8d4e9fcd7eedaa1687ad26db4e1d0ed0ae59c27722254bc14\": container with ID starting with e9a0e48e65a5ffb8d4e9fcd7eedaa1687ad26db4e1d0ed0ae59c27722254bc14 not found: ID does not exist" Oct 11 03:58:40 crc kubenswrapper[4798]: I1011 03:58:40.181538 4798 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b37ceeac-8a39-4c11-8e4a-51047efe0da4-utilities\") on node \"crc\" DevicePath \"\"" Oct 11 03:58:40 crc kubenswrapper[4798]: I1011 03:58:40.181559 4798 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b37ceeac-8a39-4c11-8e4a-51047efe0da4-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 11 03:58:40 crc kubenswrapper[4798]: I1011 03:58:40.181572 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wlq27\" (UniqueName: \"kubernetes.io/projected/b37ceeac-8a39-4c11-8e4a-51047efe0da4-kube-api-access-wlq27\") on node \"crc\" DevicePath \"\"" Oct 11 03:58:40 crc kubenswrapper[4798]: I1011 03:58:40.454559 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-n8zfl"] Oct 11 03:58:40 crc kubenswrapper[4798]: I1011 03:58:40.459127 4798 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-n8zfl"] Oct 11 03:58:41 crc kubenswrapper[4798]: I1011 03:58:41.431695 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b37ceeac-8a39-4c11-8e4a-51047efe0da4" path="/var/lib/kubelet/pods/b37ceeac-8a39-4c11-8e4a-51047efe0da4/volumes" Oct 11 03:58:44 crc kubenswrapper[4798]: I1011 03:58:44.518519 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-l8bxj" Oct 11 03:58:44 crc kubenswrapper[4798]: I1011 03:58:44.518957 4798 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-l8bxj" Oct 11 03:58:44 crc kubenswrapper[4798]: I1011 03:58:44.582817 4798 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-l8bxj" Oct 11 03:58:44 crc kubenswrapper[4798]: I1011 03:58:44.950534 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-x4ngf" Oct 11 03:58:45 crc kubenswrapper[4798]: I1011 03:58:45.005228 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-x4ngf"] Oct 11 03:58:45 crc kubenswrapper[4798]: I1011 03:58:45.148731 4798 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-x4ngf" podUID="07406786-26c9-42e3-9602-64c9cbe7f235" containerName="registry-server" containerID="cri-o://815d6c4f81f95befe3e0b31b2244cfa71998a12724b3ee31d22e4dbfac7a0e72" gracePeriod=2 Oct 11 03:58:45 crc kubenswrapper[4798]: I1011 03:58:45.196530 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-l8bxj" Oct 11 03:58:46 crc kubenswrapper[4798]: I1011 03:58:46.614543 4798 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-mvv66" Oct 11 03:58:46 crc kubenswrapper[4798]: I1011 03:58:46.615079 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-mvv66" Oct 11 03:58:46 crc kubenswrapper[4798]: I1011 03:58:46.666911 4798 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-mvv66" Oct 11 03:58:47 crc kubenswrapper[4798]: I1011 03:58:47.061890 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-wz94f" Oct 11 03:58:47 crc kubenswrapper[4798]: I1011 03:58:47.164406 4798 generic.go:334] "Generic (PLEG): container finished" podID="07406786-26c9-42e3-9602-64c9cbe7f235" containerID="815d6c4f81f95befe3e0b31b2244cfa71998a12724b3ee31d22e4dbfac7a0e72" exitCode=0 Oct 11 03:58:47 crc kubenswrapper[4798]: I1011 03:58:47.165428 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-x4ngf" event={"ID":"07406786-26c9-42e3-9602-64c9cbe7f235","Type":"ContainerDied","Data":"815d6c4f81f95befe3e0b31b2244cfa71998a12724b3ee31d22e4dbfac7a0e72"} Oct 11 03:58:47 crc kubenswrapper[4798]: I1011 03:58:47.218115 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-mvv66" Oct 11 03:58:47 crc kubenswrapper[4798]: I1011 03:58:47.723775 4798 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-mlj2w" Oct 11 03:58:47 crc kubenswrapper[4798]: I1011 03:58:47.794176 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-mlj2w" Oct 11 03:58:48 crc kubenswrapper[4798]: I1011 03:58:48.020079 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-x4ngf" Oct 11 03:58:48 crc kubenswrapper[4798]: I1011 03:58:48.107542 4798 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-xxb82" Oct 11 03:58:48 crc kubenswrapper[4798]: I1011 03:58:48.122775 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/07406786-26c9-42e3-9602-64c9cbe7f235-catalog-content\") pod \"07406786-26c9-42e3-9602-64c9cbe7f235\" (UID: \"07406786-26c9-42e3-9602-64c9cbe7f235\") " Oct 11 03:58:48 crc kubenswrapper[4798]: I1011 03:58:48.122911 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/07406786-26c9-42e3-9602-64c9cbe7f235-utilities\") pod \"07406786-26c9-42e3-9602-64c9cbe7f235\" (UID: \"07406786-26c9-42e3-9602-64c9cbe7f235\") " Oct 11 03:58:48 crc kubenswrapper[4798]: I1011 03:58:48.122949 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-72rr6\" (UniqueName: \"kubernetes.io/projected/07406786-26c9-42e3-9602-64c9cbe7f235-kube-api-access-72rr6\") pod \"07406786-26c9-42e3-9602-64c9cbe7f235\" (UID: \"07406786-26c9-42e3-9602-64c9cbe7f235\") " Oct 11 03:58:48 crc kubenswrapper[4798]: I1011 03:58:48.124153 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/07406786-26c9-42e3-9602-64c9cbe7f235-utilities" (OuterVolumeSpecName: "utilities") pod "07406786-26c9-42e3-9602-64c9cbe7f235" (UID: "07406786-26c9-42e3-9602-64c9cbe7f235"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 03:58:48 crc kubenswrapper[4798]: I1011 03:58:48.145654 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/07406786-26c9-42e3-9602-64c9cbe7f235-kube-api-access-72rr6" (OuterVolumeSpecName: "kube-api-access-72rr6") pod "07406786-26c9-42e3-9602-64c9cbe7f235" (UID: "07406786-26c9-42e3-9602-64c9cbe7f235"). InnerVolumeSpecName "kube-api-access-72rr6". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 03:58:48 crc kubenswrapper[4798]: I1011 03:58:48.171030 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-xxb82" Oct 11 03:58:48 crc kubenswrapper[4798]: I1011 03:58:48.186568 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-x4ngf" event={"ID":"07406786-26c9-42e3-9602-64c9cbe7f235","Type":"ContainerDied","Data":"4a897bb8be4c0e300a0f9ad9de25f02ada482b1c364f314bc868d30d11f74d10"} Oct 11 03:58:48 crc kubenswrapper[4798]: I1011 03:58:48.186654 4798 scope.go:117] "RemoveContainer" containerID="815d6c4f81f95befe3e0b31b2244cfa71998a12724b3ee31d22e4dbfac7a0e72" Oct 11 03:58:48 crc kubenswrapper[4798]: I1011 03:58:48.187022 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-x4ngf" Oct 11 03:58:48 crc kubenswrapper[4798]: I1011 03:58:48.208463 4798 scope.go:117] "RemoveContainer" containerID="8109bfa453061ec223baf6735b13be6be983c67bb8494929e7bdb5b83cda8867" Oct 11 03:58:48 crc kubenswrapper[4798]: I1011 03:58:48.223030 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/07406786-26c9-42e3-9602-64c9cbe7f235-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "07406786-26c9-42e3-9602-64c9cbe7f235" (UID: "07406786-26c9-42e3-9602-64c9cbe7f235"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 03:58:48 crc kubenswrapper[4798]: I1011 03:58:48.224317 4798 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/07406786-26c9-42e3-9602-64c9cbe7f235-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 11 03:58:48 crc kubenswrapper[4798]: I1011 03:58:48.224806 4798 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/07406786-26c9-42e3-9602-64c9cbe7f235-utilities\") on node \"crc\" DevicePath \"\"" Oct 11 03:58:48 crc kubenswrapper[4798]: I1011 03:58:48.224933 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-72rr6\" (UniqueName: \"kubernetes.io/projected/07406786-26c9-42e3-9602-64c9cbe7f235-kube-api-access-72rr6\") on node \"crc\" DevicePath \"\"" Oct 11 03:58:48 crc kubenswrapper[4798]: I1011 03:58:48.228545 4798 scope.go:117] "RemoveContainer" containerID="0b02982d8ced9097ed73ed023860a43bbe8f3f1df65a492b54b65ed3a5675a49" Oct 11 03:58:48 crc kubenswrapper[4798]: I1011 03:58:48.517251 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-x4ngf"] Oct 11 03:58:48 crc kubenswrapper[4798]: I1011 03:58:48.521538 4798 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-x4ngf"] Oct 11 03:58:49 crc kubenswrapper[4798]: I1011 03:58:49.019832 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-wz94f"] Oct 11 03:58:49 crc kubenswrapper[4798]: I1011 03:58:49.020126 4798 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-wz94f" podUID="a45b7815-8c30-409d-96d9-c387b09c7d6b" containerName="registry-server" containerID="cri-o://8ec5049297124f0bcb28669c65f3892430a1f9e436575736523aede24854d8e3" gracePeriod=2 Oct 11 03:58:49 crc kubenswrapper[4798]: I1011 03:58:49.205553 4798 generic.go:334] "Generic (PLEG): container finished" podID="a45b7815-8c30-409d-96d9-c387b09c7d6b" containerID="8ec5049297124f0bcb28669c65f3892430a1f9e436575736523aede24854d8e3" exitCode=0 Oct 11 03:58:49 crc kubenswrapper[4798]: I1011 03:58:49.205658 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-wz94f" event={"ID":"a45b7815-8c30-409d-96d9-c387b09c7d6b","Type":"ContainerDied","Data":"8ec5049297124f0bcb28669c65f3892430a1f9e436575736523aede24854d8e3"} Oct 11 03:58:49 crc kubenswrapper[4798]: I1011 03:58:49.430872 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="07406786-26c9-42e3-9602-64c9cbe7f235" path="/var/lib/kubelet/pods/07406786-26c9-42e3-9602-64c9cbe7f235/volumes" Oct 11 03:58:49 crc kubenswrapper[4798]: I1011 03:58:49.445524 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-wz94f" Oct 11 03:58:49 crc kubenswrapper[4798]: I1011 03:58:49.542352 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a45b7815-8c30-409d-96d9-c387b09c7d6b-utilities\") pod \"a45b7815-8c30-409d-96d9-c387b09c7d6b\" (UID: \"a45b7815-8c30-409d-96d9-c387b09c7d6b\") " Oct 11 03:58:49 crc kubenswrapper[4798]: I1011 03:58:49.542469 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9455d\" (UniqueName: \"kubernetes.io/projected/a45b7815-8c30-409d-96d9-c387b09c7d6b-kube-api-access-9455d\") pod \"a45b7815-8c30-409d-96d9-c387b09c7d6b\" (UID: \"a45b7815-8c30-409d-96d9-c387b09c7d6b\") " Oct 11 03:58:49 crc kubenswrapper[4798]: I1011 03:58:49.542504 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a45b7815-8c30-409d-96d9-c387b09c7d6b-catalog-content\") pod \"a45b7815-8c30-409d-96d9-c387b09c7d6b\" (UID: \"a45b7815-8c30-409d-96d9-c387b09c7d6b\") " Oct 11 03:58:49 crc kubenswrapper[4798]: I1011 03:58:49.544133 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a45b7815-8c30-409d-96d9-c387b09c7d6b-utilities" (OuterVolumeSpecName: "utilities") pod "a45b7815-8c30-409d-96d9-c387b09c7d6b" (UID: "a45b7815-8c30-409d-96d9-c387b09c7d6b"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 03:58:49 crc kubenswrapper[4798]: I1011 03:58:49.546507 4798 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a45b7815-8c30-409d-96d9-c387b09c7d6b-utilities\") on node \"crc\" DevicePath \"\"" Oct 11 03:58:49 crc kubenswrapper[4798]: I1011 03:58:49.550232 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a45b7815-8c30-409d-96d9-c387b09c7d6b-kube-api-access-9455d" (OuterVolumeSpecName: "kube-api-access-9455d") pod "a45b7815-8c30-409d-96d9-c387b09c7d6b" (UID: "a45b7815-8c30-409d-96d9-c387b09c7d6b"). InnerVolumeSpecName "kube-api-access-9455d". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 03:58:49 crc kubenswrapper[4798]: I1011 03:58:49.556461 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a45b7815-8c30-409d-96d9-c387b09c7d6b-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "a45b7815-8c30-409d-96d9-c387b09c7d6b" (UID: "a45b7815-8c30-409d-96d9-c387b09c7d6b"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 03:58:49 crc kubenswrapper[4798]: I1011 03:58:49.647948 4798 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a45b7815-8c30-409d-96d9-c387b09c7d6b-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 11 03:58:49 crc kubenswrapper[4798]: I1011 03:58:49.648010 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9455d\" (UniqueName: \"kubernetes.io/projected/a45b7815-8c30-409d-96d9-c387b09c7d6b-kube-api-access-9455d\") on node \"crc\" DevicePath \"\"" Oct 11 03:58:50 crc kubenswrapper[4798]: I1011 03:58:50.221006 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-wz94f" event={"ID":"a45b7815-8c30-409d-96d9-c387b09c7d6b","Type":"ContainerDied","Data":"d76e773b6f6762bc348c8e30432f2bf0c14be2dfbcef1c368cb69998da469d3e"} Oct 11 03:58:50 crc kubenswrapper[4798]: I1011 03:58:50.221080 4798 scope.go:117] "RemoveContainer" containerID="8ec5049297124f0bcb28669c65f3892430a1f9e436575736523aede24854d8e3" Oct 11 03:58:50 crc kubenswrapper[4798]: I1011 03:58:50.221083 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-wz94f" Oct 11 03:58:50 crc kubenswrapper[4798]: I1011 03:58:50.237566 4798 scope.go:117] "RemoveContainer" containerID="fd16003dfc4324eb16c16e3408df5eff42c9fe5c786b4dd631f0d1ea53944161" Oct 11 03:58:50 crc kubenswrapper[4798]: I1011 03:58:50.252838 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-wz94f"] Oct 11 03:58:50 crc kubenswrapper[4798]: I1011 03:58:50.258212 4798 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-wz94f"] Oct 11 03:58:50 crc kubenswrapper[4798]: I1011 03:58:50.267090 4798 scope.go:117] "RemoveContainer" containerID="ae8942f7ac5926ae9ddb20bf0a4e71c9f13cd6a90b97aaa54b4ff1d3f781ec2c" Oct 11 03:58:51 crc kubenswrapper[4798]: I1011 03:58:51.431884 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a45b7815-8c30-409d-96d9-c387b09c7d6b" path="/var/lib/kubelet/pods/a45b7815-8c30-409d-96d9-c387b09c7d6b/volumes" Oct 11 03:58:52 crc kubenswrapper[4798]: I1011 03:58:52.019436 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-xxb82"] Oct 11 03:58:52 crc kubenswrapper[4798]: I1011 03:58:52.019762 4798 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-xxb82" podUID="036b2b20-470d-4ea9-a0ef-10927444dfda" containerName="registry-server" containerID="cri-o://30e8039c1efde14c73bdc2abff8774461aef57211dfca9a6ca4ee08eaf419a7c" gracePeriod=2 Oct 11 03:58:52 crc kubenswrapper[4798]: I1011 03:58:52.242967 4798 generic.go:334] "Generic (PLEG): container finished" podID="036b2b20-470d-4ea9-a0ef-10927444dfda" containerID="30e8039c1efde14c73bdc2abff8774461aef57211dfca9a6ca4ee08eaf419a7c" exitCode=0 Oct 11 03:58:52 crc kubenswrapper[4798]: I1011 03:58:52.243035 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xxb82" event={"ID":"036b2b20-470d-4ea9-a0ef-10927444dfda","Type":"ContainerDied","Data":"30e8039c1efde14c73bdc2abff8774461aef57211dfca9a6ca4ee08eaf419a7c"} Oct 11 03:58:52 crc kubenswrapper[4798]: I1011 03:58:52.404158 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-xxb82" Oct 11 03:58:52 crc kubenswrapper[4798]: I1011 03:58:52.487063 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rhjws\" (UniqueName: \"kubernetes.io/projected/036b2b20-470d-4ea9-a0ef-10927444dfda-kube-api-access-rhjws\") pod \"036b2b20-470d-4ea9-a0ef-10927444dfda\" (UID: \"036b2b20-470d-4ea9-a0ef-10927444dfda\") " Oct 11 03:58:52 crc kubenswrapper[4798]: I1011 03:58:52.487155 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/036b2b20-470d-4ea9-a0ef-10927444dfda-catalog-content\") pod \"036b2b20-470d-4ea9-a0ef-10927444dfda\" (UID: \"036b2b20-470d-4ea9-a0ef-10927444dfda\") " Oct 11 03:58:52 crc kubenswrapper[4798]: I1011 03:58:52.487299 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/036b2b20-470d-4ea9-a0ef-10927444dfda-utilities\") pod \"036b2b20-470d-4ea9-a0ef-10927444dfda\" (UID: \"036b2b20-470d-4ea9-a0ef-10927444dfda\") " Oct 11 03:58:52 crc kubenswrapper[4798]: I1011 03:58:52.488113 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/036b2b20-470d-4ea9-a0ef-10927444dfda-utilities" (OuterVolumeSpecName: "utilities") pod "036b2b20-470d-4ea9-a0ef-10927444dfda" (UID: "036b2b20-470d-4ea9-a0ef-10927444dfda"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 03:58:52 crc kubenswrapper[4798]: I1011 03:58:52.488354 4798 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/036b2b20-470d-4ea9-a0ef-10927444dfda-utilities\") on node \"crc\" DevicePath \"\"" Oct 11 03:58:52 crc kubenswrapper[4798]: I1011 03:58:52.505167 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/036b2b20-470d-4ea9-a0ef-10927444dfda-kube-api-access-rhjws" (OuterVolumeSpecName: "kube-api-access-rhjws") pod "036b2b20-470d-4ea9-a0ef-10927444dfda" (UID: "036b2b20-470d-4ea9-a0ef-10927444dfda"). InnerVolumeSpecName "kube-api-access-rhjws". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 03:58:52 crc kubenswrapper[4798]: I1011 03:58:52.589744 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rhjws\" (UniqueName: \"kubernetes.io/projected/036b2b20-470d-4ea9-a0ef-10927444dfda-kube-api-access-rhjws\") on node \"crc\" DevicePath \"\"" Oct 11 03:58:52 crc kubenswrapper[4798]: I1011 03:58:52.592521 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/036b2b20-470d-4ea9-a0ef-10927444dfda-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "036b2b20-470d-4ea9-a0ef-10927444dfda" (UID: "036b2b20-470d-4ea9-a0ef-10927444dfda"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 03:58:52 crc kubenswrapper[4798]: I1011 03:58:52.691349 4798 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/036b2b20-470d-4ea9-a0ef-10927444dfda-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 11 03:58:53 crc kubenswrapper[4798]: I1011 03:58:53.253421 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xxb82" event={"ID":"036b2b20-470d-4ea9-a0ef-10927444dfda","Type":"ContainerDied","Data":"3898cb6361e16848ff6d6c5ea82700d86ec0d560943c789d5b4905f826f260f7"} Oct 11 03:58:53 crc kubenswrapper[4798]: I1011 03:58:53.253509 4798 scope.go:117] "RemoveContainer" containerID="30e8039c1efde14c73bdc2abff8774461aef57211dfca9a6ca4ee08eaf419a7c" Oct 11 03:58:53 crc kubenswrapper[4798]: I1011 03:58:53.253517 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-xxb82" Oct 11 03:58:53 crc kubenswrapper[4798]: I1011 03:58:53.272003 4798 scope.go:117] "RemoveContainer" containerID="f532b1c4db33ca16a839c5a880b7fe8a634125664860fa2f4c0fb7f6c329eeb2" Oct 11 03:58:53 crc kubenswrapper[4798]: I1011 03:58:53.299840 4798 scope.go:117] "RemoveContainer" containerID="c64cb309e2d5d960144a816f02e5007ca9285307de0602169b39b7db7cf9a0ab" Oct 11 03:58:53 crc kubenswrapper[4798]: I1011 03:58:53.303417 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-xxb82"] Oct 11 03:58:53 crc kubenswrapper[4798]: I1011 03:58:53.307010 4798 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-xxb82"] Oct 11 03:58:53 crc kubenswrapper[4798]: I1011 03:58:53.434266 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="036b2b20-470d-4ea9-a0ef-10927444dfda" path="/var/lib/kubelet/pods/036b2b20-470d-4ea9-a0ef-10927444dfda/volumes" Oct 11 03:59:04 crc kubenswrapper[4798]: I1011 03:59:04.858889 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-65656"] Oct 11 03:59:04 crc kubenswrapper[4798]: E1011 03:59:04.860095 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="23f9fc02-cc93-408a-89a7-3698f24744dd" containerName="pruner" Oct 11 03:59:04 crc kubenswrapper[4798]: I1011 03:59:04.860111 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="23f9fc02-cc93-408a-89a7-3698f24744dd" containerName="pruner" Oct 11 03:59:04 crc kubenswrapper[4798]: E1011 03:59:04.860131 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="036b2b20-470d-4ea9-a0ef-10927444dfda" containerName="registry-server" Oct 11 03:59:04 crc kubenswrapper[4798]: I1011 03:59:04.860139 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="036b2b20-470d-4ea9-a0ef-10927444dfda" containerName="registry-server" Oct 11 03:59:04 crc kubenswrapper[4798]: E1011 03:59:04.860156 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="07406786-26c9-42e3-9602-64c9cbe7f235" containerName="extract-utilities" Oct 11 03:59:04 crc kubenswrapper[4798]: I1011 03:59:04.860164 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="07406786-26c9-42e3-9602-64c9cbe7f235" containerName="extract-utilities" Oct 11 03:59:04 crc kubenswrapper[4798]: E1011 03:59:04.860177 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b2528278-dc60-43fd-8f3c-92182689a8d8" containerName="pruner" Oct 11 03:59:04 crc kubenswrapper[4798]: I1011 03:59:04.860185 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="b2528278-dc60-43fd-8f3c-92182689a8d8" containerName="pruner" Oct 11 03:59:04 crc kubenswrapper[4798]: E1011 03:59:04.860202 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="036b2b20-470d-4ea9-a0ef-10927444dfda" containerName="extract-content" Oct 11 03:59:04 crc kubenswrapper[4798]: I1011 03:59:04.860209 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="036b2b20-470d-4ea9-a0ef-10927444dfda" containerName="extract-content" Oct 11 03:59:04 crc kubenswrapper[4798]: E1011 03:59:04.860225 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b37ceeac-8a39-4c11-8e4a-51047efe0da4" containerName="extract-utilities" Oct 11 03:59:04 crc kubenswrapper[4798]: I1011 03:59:04.860233 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="b37ceeac-8a39-4c11-8e4a-51047efe0da4" containerName="extract-utilities" Oct 11 03:59:04 crc kubenswrapper[4798]: E1011 03:59:04.860246 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b37ceeac-8a39-4c11-8e4a-51047efe0da4" containerName="extract-content" Oct 11 03:59:04 crc kubenswrapper[4798]: I1011 03:59:04.860253 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="b37ceeac-8a39-4c11-8e4a-51047efe0da4" containerName="extract-content" Oct 11 03:59:04 crc kubenswrapper[4798]: E1011 03:59:04.860265 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="07406786-26c9-42e3-9602-64c9cbe7f235" containerName="registry-server" Oct 11 03:59:04 crc kubenswrapper[4798]: I1011 03:59:04.860272 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="07406786-26c9-42e3-9602-64c9cbe7f235" containerName="registry-server" Oct 11 03:59:04 crc kubenswrapper[4798]: E1011 03:59:04.860280 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a45b7815-8c30-409d-96d9-c387b09c7d6b" containerName="extract-content" Oct 11 03:59:04 crc kubenswrapper[4798]: I1011 03:59:04.860288 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="a45b7815-8c30-409d-96d9-c387b09c7d6b" containerName="extract-content" Oct 11 03:59:04 crc kubenswrapper[4798]: E1011 03:59:04.860304 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="036b2b20-470d-4ea9-a0ef-10927444dfda" containerName="extract-utilities" Oct 11 03:59:04 crc kubenswrapper[4798]: I1011 03:59:04.860311 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="036b2b20-470d-4ea9-a0ef-10927444dfda" containerName="extract-utilities" Oct 11 03:59:04 crc kubenswrapper[4798]: E1011 03:59:04.860320 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a45b7815-8c30-409d-96d9-c387b09c7d6b" containerName="registry-server" Oct 11 03:59:04 crc kubenswrapper[4798]: I1011 03:59:04.860326 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="a45b7815-8c30-409d-96d9-c387b09c7d6b" containerName="registry-server" Oct 11 03:59:04 crc kubenswrapper[4798]: E1011 03:59:04.860344 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a45b7815-8c30-409d-96d9-c387b09c7d6b" containerName="extract-utilities" Oct 11 03:59:04 crc kubenswrapper[4798]: I1011 03:59:04.860351 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="a45b7815-8c30-409d-96d9-c387b09c7d6b" containerName="extract-utilities" Oct 11 03:59:04 crc kubenswrapper[4798]: E1011 03:59:04.860360 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="07406786-26c9-42e3-9602-64c9cbe7f235" containerName="extract-content" Oct 11 03:59:04 crc kubenswrapper[4798]: I1011 03:59:04.860367 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="07406786-26c9-42e3-9602-64c9cbe7f235" containerName="extract-content" Oct 11 03:59:04 crc kubenswrapper[4798]: E1011 03:59:04.860382 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b37ceeac-8a39-4c11-8e4a-51047efe0da4" containerName="registry-server" Oct 11 03:59:04 crc kubenswrapper[4798]: I1011 03:59:04.860410 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="b37ceeac-8a39-4c11-8e4a-51047efe0da4" containerName="registry-server" Oct 11 03:59:04 crc kubenswrapper[4798]: I1011 03:59:04.860647 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="23f9fc02-cc93-408a-89a7-3698f24744dd" containerName="pruner" Oct 11 03:59:04 crc kubenswrapper[4798]: I1011 03:59:04.860670 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="036b2b20-470d-4ea9-a0ef-10927444dfda" containerName="registry-server" Oct 11 03:59:04 crc kubenswrapper[4798]: I1011 03:59:04.860692 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="b2528278-dc60-43fd-8f3c-92182689a8d8" containerName="pruner" Oct 11 03:59:04 crc kubenswrapper[4798]: I1011 03:59:04.860704 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="b37ceeac-8a39-4c11-8e4a-51047efe0da4" containerName="registry-server" Oct 11 03:59:04 crc kubenswrapper[4798]: I1011 03:59:04.860734 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="a45b7815-8c30-409d-96d9-c387b09c7d6b" containerName="registry-server" Oct 11 03:59:04 crc kubenswrapper[4798]: I1011 03:59:04.860751 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="07406786-26c9-42e3-9602-64c9cbe7f235" containerName="registry-server" Oct 11 03:59:04 crc kubenswrapper[4798]: I1011 03:59:04.861407 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-65656" Oct 11 03:59:04 crc kubenswrapper[4798]: I1011 03:59:04.881466 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-65656"] Oct 11 03:59:04 crc kubenswrapper[4798]: I1011 03:59:04.978123 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/bbc3e90f-d267-4a2b-9c48-a718af327d54-installation-pull-secrets\") pod \"image-registry-66df7c8f76-65656\" (UID: \"bbc3e90f-d267-4a2b-9c48-a718af327d54\") " pod="openshift-image-registry/image-registry-66df7c8f76-65656" Oct 11 03:59:04 crc kubenswrapper[4798]: I1011 03:59:04.978199 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/bbc3e90f-d267-4a2b-9c48-a718af327d54-registry-tls\") pod \"image-registry-66df7c8f76-65656\" (UID: \"bbc3e90f-d267-4a2b-9c48-a718af327d54\") " pod="openshift-image-registry/image-registry-66df7c8f76-65656" Oct 11 03:59:04 crc kubenswrapper[4798]: I1011 03:59:04.978223 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j67qb\" (UniqueName: \"kubernetes.io/projected/bbc3e90f-d267-4a2b-9c48-a718af327d54-kube-api-access-j67qb\") pod \"image-registry-66df7c8f76-65656\" (UID: \"bbc3e90f-d267-4a2b-9c48-a718af327d54\") " pod="openshift-image-registry/image-registry-66df7c8f76-65656" Oct 11 03:59:04 crc kubenswrapper[4798]: I1011 03:59:04.978277 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/bbc3e90f-d267-4a2b-9c48-a718af327d54-ca-trust-extracted\") pod \"image-registry-66df7c8f76-65656\" (UID: \"bbc3e90f-d267-4a2b-9c48-a718af327d54\") " pod="openshift-image-registry/image-registry-66df7c8f76-65656" Oct 11 03:59:04 crc kubenswrapper[4798]: I1011 03:59:04.978324 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bbc3e90f-d267-4a2b-9c48-a718af327d54-trusted-ca\") pod \"image-registry-66df7c8f76-65656\" (UID: \"bbc3e90f-d267-4a2b-9c48-a718af327d54\") " pod="openshift-image-registry/image-registry-66df7c8f76-65656" Oct 11 03:59:04 crc kubenswrapper[4798]: I1011 03:59:04.978366 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-65656\" (UID: \"bbc3e90f-d267-4a2b-9c48-a718af327d54\") " pod="openshift-image-registry/image-registry-66df7c8f76-65656" Oct 11 03:59:04 crc kubenswrapper[4798]: I1011 03:59:04.978417 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/bbc3e90f-d267-4a2b-9c48-a718af327d54-registry-certificates\") pod \"image-registry-66df7c8f76-65656\" (UID: \"bbc3e90f-d267-4a2b-9c48-a718af327d54\") " pod="openshift-image-registry/image-registry-66df7c8f76-65656" Oct 11 03:59:04 crc kubenswrapper[4798]: I1011 03:59:04.978447 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bbc3e90f-d267-4a2b-9c48-a718af327d54-bound-sa-token\") pod \"image-registry-66df7c8f76-65656\" (UID: \"bbc3e90f-d267-4a2b-9c48-a718af327d54\") " pod="openshift-image-registry/image-registry-66df7c8f76-65656" Oct 11 03:59:05 crc kubenswrapper[4798]: I1011 03:59:05.002784 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-65656\" (UID: \"bbc3e90f-d267-4a2b-9c48-a718af327d54\") " pod="openshift-image-registry/image-registry-66df7c8f76-65656" Oct 11 03:59:05 crc kubenswrapper[4798]: I1011 03:59:05.079378 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bbc3e90f-d267-4a2b-9c48-a718af327d54-trusted-ca\") pod \"image-registry-66df7c8f76-65656\" (UID: \"bbc3e90f-d267-4a2b-9c48-a718af327d54\") " pod="openshift-image-registry/image-registry-66df7c8f76-65656" Oct 11 03:59:05 crc kubenswrapper[4798]: I1011 03:59:05.079968 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/bbc3e90f-d267-4a2b-9c48-a718af327d54-registry-certificates\") pod \"image-registry-66df7c8f76-65656\" (UID: \"bbc3e90f-d267-4a2b-9c48-a718af327d54\") " pod="openshift-image-registry/image-registry-66df7c8f76-65656" Oct 11 03:59:05 crc kubenswrapper[4798]: I1011 03:59:05.079999 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bbc3e90f-d267-4a2b-9c48-a718af327d54-bound-sa-token\") pod \"image-registry-66df7c8f76-65656\" (UID: \"bbc3e90f-d267-4a2b-9c48-a718af327d54\") " pod="openshift-image-registry/image-registry-66df7c8f76-65656" Oct 11 03:59:05 crc kubenswrapper[4798]: I1011 03:59:05.080028 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/bbc3e90f-d267-4a2b-9c48-a718af327d54-installation-pull-secrets\") pod \"image-registry-66df7c8f76-65656\" (UID: \"bbc3e90f-d267-4a2b-9c48-a718af327d54\") " pod="openshift-image-registry/image-registry-66df7c8f76-65656" Oct 11 03:59:05 crc kubenswrapper[4798]: I1011 03:59:05.080080 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/bbc3e90f-d267-4a2b-9c48-a718af327d54-registry-tls\") pod \"image-registry-66df7c8f76-65656\" (UID: \"bbc3e90f-d267-4a2b-9c48-a718af327d54\") " pod="openshift-image-registry/image-registry-66df7c8f76-65656" Oct 11 03:59:05 crc kubenswrapper[4798]: I1011 03:59:05.080103 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j67qb\" (UniqueName: \"kubernetes.io/projected/bbc3e90f-d267-4a2b-9c48-a718af327d54-kube-api-access-j67qb\") pod \"image-registry-66df7c8f76-65656\" (UID: \"bbc3e90f-d267-4a2b-9c48-a718af327d54\") " pod="openshift-image-registry/image-registry-66df7c8f76-65656" Oct 11 03:59:05 crc kubenswrapper[4798]: I1011 03:59:05.080136 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/bbc3e90f-d267-4a2b-9c48-a718af327d54-ca-trust-extracted\") pod \"image-registry-66df7c8f76-65656\" (UID: \"bbc3e90f-d267-4a2b-9c48-a718af327d54\") " pod="openshift-image-registry/image-registry-66df7c8f76-65656" Oct 11 03:59:05 crc kubenswrapper[4798]: I1011 03:59:05.080702 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/bbc3e90f-d267-4a2b-9c48-a718af327d54-ca-trust-extracted\") pod \"image-registry-66df7c8f76-65656\" (UID: \"bbc3e90f-d267-4a2b-9c48-a718af327d54\") " pod="openshift-image-registry/image-registry-66df7c8f76-65656" Oct 11 03:59:05 crc kubenswrapper[4798]: I1011 03:59:05.080946 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bbc3e90f-d267-4a2b-9c48-a718af327d54-trusted-ca\") pod \"image-registry-66df7c8f76-65656\" (UID: \"bbc3e90f-d267-4a2b-9c48-a718af327d54\") " pod="openshift-image-registry/image-registry-66df7c8f76-65656" Oct 11 03:59:05 crc kubenswrapper[4798]: I1011 03:59:05.081549 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/bbc3e90f-d267-4a2b-9c48-a718af327d54-registry-certificates\") pod \"image-registry-66df7c8f76-65656\" (UID: \"bbc3e90f-d267-4a2b-9c48-a718af327d54\") " pod="openshift-image-registry/image-registry-66df7c8f76-65656" Oct 11 03:59:05 crc kubenswrapper[4798]: I1011 03:59:05.088287 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/bbc3e90f-d267-4a2b-9c48-a718af327d54-installation-pull-secrets\") pod \"image-registry-66df7c8f76-65656\" (UID: \"bbc3e90f-d267-4a2b-9c48-a718af327d54\") " pod="openshift-image-registry/image-registry-66df7c8f76-65656" Oct 11 03:59:05 crc kubenswrapper[4798]: I1011 03:59:05.088358 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/bbc3e90f-d267-4a2b-9c48-a718af327d54-registry-tls\") pod \"image-registry-66df7c8f76-65656\" (UID: \"bbc3e90f-d267-4a2b-9c48-a718af327d54\") " pod="openshift-image-registry/image-registry-66df7c8f76-65656" Oct 11 03:59:05 crc kubenswrapper[4798]: I1011 03:59:05.099362 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bbc3e90f-d267-4a2b-9c48-a718af327d54-bound-sa-token\") pod \"image-registry-66df7c8f76-65656\" (UID: \"bbc3e90f-d267-4a2b-9c48-a718af327d54\") " pod="openshift-image-registry/image-registry-66df7c8f76-65656" Oct 11 03:59:05 crc kubenswrapper[4798]: I1011 03:59:05.099781 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j67qb\" (UniqueName: \"kubernetes.io/projected/bbc3e90f-d267-4a2b-9c48-a718af327d54-kube-api-access-j67qb\") pod \"image-registry-66df7c8f76-65656\" (UID: \"bbc3e90f-d267-4a2b-9c48-a718af327d54\") " pod="openshift-image-registry/image-registry-66df7c8f76-65656" Oct 11 03:59:05 crc kubenswrapper[4798]: I1011 03:59:05.191872 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-65656" Oct 11 03:59:05 crc kubenswrapper[4798]: I1011 03:59:05.390149 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-65656"] Oct 11 03:59:05 crc kubenswrapper[4798]: W1011 03:59:05.397892 4798 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podbbc3e90f_d267_4a2b_9c48_a718af327d54.slice/crio-858e697ecb4ca43433b782924648c94b06e4be56e945af1309e38127622c2d53 WatchSource:0}: Error finding container 858e697ecb4ca43433b782924648c94b06e4be56e945af1309e38127622c2d53: Status 404 returned error can't find the container with id 858e697ecb4ca43433b782924648c94b06e4be56e945af1309e38127622c2d53 Oct 11 03:59:06 crc kubenswrapper[4798]: I1011 03:59:06.337231 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-65656" event={"ID":"bbc3e90f-d267-4a2b-9c48-a718af327d54","Type":"ContainerStarted","Data":"1eec11e2238be5b800d2d7ebb2777bdbfebbfe23bc57f2e731c2298a5a11db58"} Oct 11 03:59:06 crc kubenswrapper[4798]: I1011 03:59:06.337767 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-66df7c8f76-65656" Oct 11 03:59:06 crc kubenswrapper[4798]: I1011 03:59:06.337786 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-65656" event={"ID":"bbc3e90f-d267-4a2b-9c48-a718af327d54","Type":"ContainerStarted","Data":"858e697ecb4ca43433b782924648c94b06e4be56e945af1309e38127622c2d53"} Oct 11 03:59:06 crc kubenswrapper[4798]: I1011 03:59:06.360276 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-66df7c8f76-65656" podStartSLOduration=2.360260723 podStartE2EDuration="2.360260723s" podCreationTimestamp="2025-10-11 03:59:04 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 03:59:06.358079459 +0000 UTC m=+241.694369145" watchObservedRunningTime="2025-10-11 03:59:06.360260723 +0000 UTC m=+241.696550409" Oct 11 03:59:10 crc kubenswrapper[4798]: I1011 03:59:10.371895 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-7k4v2"] Oct 11 03:59:10 crc kubenswrapper[4798]: I1011 03:59:10.373612 4798 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-7k4v2" podUID="08878c03-1346-41ac-9f4f-874cd48e2129" containerName="registry-server" containerID="cri-o://cc9e901028fc84af065c9375550e0e1d67d2ba4280feb013c3d3cac12ad60489" gracePeriod=30 Oct 11 03:59:10 crc kubenswrapper[4798]: I1011 03:59:10.384091 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-l8bxj"] Oct 11 03:59:10 crc kubenswrapper[4798]: I1011 03:59:10.384518 4798 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-l8bxj" podUID="5df1000c-7c15-48cd-adcc-0c286e651fad" containerName="registry-server" containerID="cri-o://25402c0fe51dc0118b2de6a1cedb19603b70fe7057bd26b5b115a6c96d3424a0" gracePeriod=30 Oct 11 03:59:10 crc kubenswrapper[4798]: I1011 03:59:10.405718 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-dp4kr"] Oct 11 03:59:10 crc kubenswrapper[4798]: I1011 03:59:10.407596 4798 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/marketplace-operator-79b997595-dp4kr" podUID="45c5e131-b2e8-4a3f-8c26-0dfe5cacee11" containerName="marketplace-operator" containerID="cri-o://403233cb4d6cc930947779dbde5efdf608eae4e2004a4936d2f878d2794daf7e" gracePeriod=30 Oct 11 03:59:10 crc kubenswrapper[4798]: I1011 03:59:10.409871 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-mvv66"] Oct 11 03:59:10 crc kubenswrapper[4798]: I1011 03:59:10.410215 4798 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-mvv66" podUID="de740119-5acc-48d0-aaa4-0a64e054cac8" containerName="registry-server" containerID="cri-o://c2b5a728290701acd3977918422f343f12e40c5d6926425a845cce330bd70c57" gracePeriod=30 Oct 11 03:59:10 crc kubenswrapper[4798]: I1011 03:59:10.416208 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-rkg2t"] Oct 11 03:59:10 crc kubenswrapper[4798]: I1011 03:59:10.417117 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-rkg2t" Oct 11 03:59:10 crc kubenswrapper[4798]: I1011 03:59:10.423689 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-mlj2w"] Oct 11 03:59:10 crc kubenswrapper[4798]: I1011 03:59:10.423975 4798 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-mlj2w" podUID="c27e187f-5d34-4455-9a63-8e3f213139bc" containerName="registry-server" containerID="cri-o://a9d117cb51c4ad6aa00b2cd2223a188d200bccdc723ffbcf9600962b2c855743" gracePeriod=30 Oct 11 03:59:10 crc kubenswrapper[4798]: I1011 03:59:10.427542 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-rkg2t"] Oct 11 03:59:10 crc kubenswrapper[4798]: I1011 03:59:10.601980 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8p48l\" (UniqueName: \"kubernetes.io/projected/0d156e8f-2a95-406a-beb0-c9b7e36f9e8b-kube-api-access-8p48l\") pod \"marketplace-operator-79b997595-rkg2t\" (UID: \"0d156e8f-2a95-406a-beb0-c9b7e36f9e8b\") " pod="openshift-marketplace/marketplace-operator-79b997595-rkg2t" Oct 11 03:59:10 crc kubenswrapper[4798]: I1011 03:59:10.602455 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/0d156e8f-2a95-406a-beb0-c9b7e36f9e8b-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-rkg2t\" (UID: \"0d156e8f-2a95-406a-beb0-c9b7e36f9e8b\") " pod="openshift-marketplace/marketplace-operator-79b997595-rkg2t" Oct 11 03:59:10 crc kubenswrapper[4798]: I1011 03:59:10.602507 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/0d156e8f-2a95-406a-beb0-c9b7e36f9e8b-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-rkg2t\" (UID: \"0d156e8f-2a95-406a-beb0-c9b7e36f9e8b\") " pod="openshift-marketplace/marketplace-operator-79b997595-rkg2t" Oct 11 03:59:10 crc kubenswrapper[4798]: I1011 03:59:10.703685 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/0d156e8f-2a95-406a-beb0-c9b7e36f9e8b-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-rkg2t\" (UID: \"0d156e8f-2a95-406a-beb0-c9b7e36f9e8b\") " pod="openshift-marketplace/marketplace-operator-79b997595-rkg2t" Oct 11 03:59:10 crc kubenswrapper[4798]: I1011 03:59:10.703797 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8p48l\" (UniqueName: \"kubernetes.io/projected/0d156e8f-2a95-406a-beb0-c9b7e36f9e8b-kube-api-access-8p48l\") pod \"marketplace-operator-79b997595-rkg2t\" (UID: \"0d156e8f-2a95-406a-beb0-c9b7e36f9e8b\") " pod="openshift-marketplace/marketplace-operator-79b997595-rkg2t" Oct 11 03:59:10 crc kubenswrapper[4798]: I1011 03:59:10.703829 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/0d156e8f-2a95-406a-beb0-c9b7e36f9e8b-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-rkg2t\" (UID: \"0d156e8f-2a95-406a-beb0-c9b7e36f9e8b\") " pod="openshift-marketplace/marketplace-operator-79b997595-rkg2t" Oct 11 03:59:10 crc kubenswrapper[4798]: I1011 03:59:10.712671 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/0d156e8f-2a95-406a-beb0-c9b7e36f9e8b-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-rkg2t\" (UID: \"0d156e8f-2a95-406a-beb0-c9b7e36f9e8b\") " pod="openshift-marketplace/marketplace-operator-79b997595-rkg2t" Oct 11 03:59:10 crc kubenswrapper[4798]: I1011 03:59:10.714465 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/0d156e8f-2a95-406a-beb0-c9b7e36f9e8b-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-rkg2t\" (UID: \"0d156e8f-2a95-406a-beb0-c9b7e36f9e8b\") " pod="openshift-marketplace/marketplace-operator-79b997595-rkg2t" Oct 11 03:59:10 crc kubenswrapper[4798]: I1011 03:59:10.735062 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8p48l\" (UniqueName: \"kubernetes.io/projected/0d156e8f-2a95-406a-beb0-c9b7e36f9e8b-kube-api-access-8p48l\") pod \"marketplace-operator-79b997595-rkg2t\" (UID: \"0d156e8f-2a95-406a-beb0-c9b7e36f9e8b\") " pod="openshift-marketplace/marketplace-operator-79b997595-rkg2t" Oct 11 03:59:10 crc kubenswrapper[4798]: I1011 03:59:10.744849 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-rkg2t" Oct 11 03:59:10 crc kubenswrapper[4798]: I1011 03:59:10.847850 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7k4v2" Oct 11 03:59:10 crc kubenswrapper[4798]: I1011 03:59:10.860718 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-l8bxj" Oct 11 03:59:10 crc kubenswrapper[4798]: I1011 03:59:10.861110 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-mlj2w" Oct 11 03:59:10 crc kubenswrapper[4798]: I1011 03:59:10.862756 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-mvv66" Oct 11 03:59:10 crc kubenswrapper[4798]: I1011 03:59:10.866050 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-dp4kr" Oct 11 03:59:11 crc kubenswrapper[4798]: I1011 03:59:11.008462 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g48hs\" (UniqueName: \"kubernetes.io/projected/5df1000c-7c15-48cd-adcc-0c286e651fad-kube-api-access-g48hs\") pod \"5df1000c-7c15-48cd-adcc-0c286e651fad\" (UID: \"5df1000c-7c15-48cd-adcc-0c286e651fad\") " Oct 11 03:59:11 crc kubenswrapper[4798]: I1011 03:59:11.008525 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/08878c03-1346-41ac-9f4f-874cd48e2129-utilities\") pod \"08878c03-1346-41ac-9f4f-874cd48e2129\" (UID: \"08878c03-1346-41ac-9f4f-874cd48e2129\") " Oct 11 03:59:11 crc kubenswrapper[4798]: I1011 03:59:11.008590 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/45c5e131-b2e8-4a3f-8c26-0dfe5cacee11-marketplace-operator-metrics\") pod \"45c5e131-b2e8-4a3f-8c26-0dfe5cacee11\" (UID: \"45c5e131-b2e8-4a3f-8c26-0dfe5cacee11\") " Oct 11 03:59:11 crc kubenswrapper[4798]: I1011 03:59:11.008613 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c27e187f-5d34-4455-9a63-8e3f213139bc-utilities\") pod \"c27e187f-5d34-4455-9a63-8e3f213139bc\" (UID: \"c27e187f-5d34-4455-9a63-8e3f213139bc\") " Oct 11 03:59:11 crc kubenswrapper[4798]: I1011 03:59:11.008674 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5df1000c-7c15-48cd-adcc-0c286e651fad-catalog-content\") pod \"5df1000c-7c15-48cd-adcc-0c286e651fad\" (UID: \"5df1000c-7c15-48cd-adcc-0c286e651fad\") " Oct 11 03:59:11 crc kubenswrapper[4798]: I1011 03:59:11.008690 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/de740119-5acc-48d0-aaa4-0a64e054cac8-catalog-content\") pod \"de740119-5acc-48d0-aaa4-0a64e054cac8\" (UID: \"de740119-5acc-48d0-aaa4-0a64e054cac8\") " Oct 11 03:59:11 crc kubenswrapper[4798]: I1011 03:59:11.008728 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-snwss\" (UniqueName: \"kubernetes.io/projected/08878c03-1346-41ac-9f4f-874cd48e2129-kube-api-access-snwss\") pod \"08878c03-1346-41ac-9f4f-874cd48e2129\" (UID: \"08878c03-1346-41ac-9f4f-874cd48e2129\") " Oct 11 03:59:11 crc kubenswrapper[4798]: I1011 03:59:11.008754 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c27e187f-5d34-4455-9a63-8e3f213139bc-catalog-content\") pod \"c27e187f-5d34-4455-9a63-8e3f213139bc\" (UID: \"c27e187f-5d34-4455-9a63-8e3f213139bc\") " Oct 11 03:59:11 crc kubenswrapper[4798]: I1011 03:59:11.008802 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/08878c03-1346-41ac-9f4f-874cd48e2129-catalog-content\") pod \"08878c03-1346-41ac-9f4f-874cd48e2129\" (UID: \"08878c03-1346-41ac-9f4f-874cd48e2129\") " Oct 11 03:59:11 crc kubenswrapper[4798]: I1011 03:59:11.008829 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-c2g9n\" (UniqueName: \"kubernetes.io/projected/c27e187f-5d34-4455-9a63-8e3f213139bc-kube-api-access-c2g9n\") pod \"c27e187f-5d34-4455-9a63-8e3f213139bc\" (UID: \"c27e187f-5d34-4455-9a63-8e3f213139bc\") " Oct 11 03:59:11 crc kubenswrapper[4798]: I1011 03:59:11.008873 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5df1000c-7c15-48cd-adcc-0c286e651fad-utilities\") pod \"5df1000c-7c15-48cd-adcc-0c286e651fad\" (UID: \"5df1000c-7c15-48cd-adcc-0c286e651fad\") " Oct 11 03:59:11 crc kubenswrapper[4798]: I1011 03:59:11.008898 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/de740119-5acc-48d0-aaa4-0a64e054cac8-utilities\") pod \"de740119-5acc-48d0-aaa4-0a64e054cac8\" (UID: \"de740119-5acc-48d0-aaa4-0a64e054cac8\") " Oct 11 03:59:11 crc kubenswrapper[4798]: I1011 03:59:11.008918 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xfnw9\" (UniqueName: \"kubernetes.io/projected/45c5e131-b2e8-4a3f-8c26-0dfe5cacee11-kube-api-access-xfnw9\") pod \"45c5e131-b2e8-4a3f-8c26-0dfe5cacee11\" (UID: \"45c5e131-b2e8-4a3f-8c26-0dfe5cacee11\") " Oct 11 03:59:11 crc kubenswrapper[4798]: I1011 03:59:11.008992 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/45c5e131-b2e8-4a3f-8c26-0dfe5cacee11-marketplace-trusted-ca\") pod \"45c5e131-b2e8-4a3f-8c26-0dfe5cacee11\" (UID: \"45c5e131-b2e8-4a3f-8c26-0dfe5cacee11\") " Oct 11 03:59:11 crc kubenswrapper[4798]: I1011 03:59:11.009031 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nrgv5\" (UniqueName: \"kubernetes.io/projected/de740119-5acc-48d0-aaa4-0a64e054cac8-kube-api-access-nrgv5\") pod \"de740119-5acc-48d0-aaa4-0a64e054cac8\" (UID: \"de740119-5acc-48d0-aaa4-0a64e054cac8\") " Oct 11 03:59:11 crc kubenswrapper[4798]: I1011 03:59:11.011324 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c27e187f-5d34-4455-9a63-8e3f213139bc-utilities" (OuterVolumeSpecName: "utilities") pod "c27e187f-5d34-4455-9a63-8e3f213139bc" (UID: "c27e187f-5d34-4455-9a63-8e3f213139bc"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 03:59:11 crc kubenswrapper[4798]: I1011 03:59:11.012025 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/45c5e131-b2e8-4a3f-8c26-0dfe5cacee11-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "45c5e131-b2e8-4a3f-8c26-0dfe5cacee11" (UID: "45c5e131-b2e8-4a3f-8c26-0dfe5cacee11"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 03:59:11 crc kubenswrapper[4798]: I1011 03:59:11.012620 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5df1000c-7c15-48cd-adcc-0c286e651fad-utilities" (OuterVolumeSpecName: "utilities") pod "5df1000c-7c15-48cd-adcc-0c286e651fad" (UID: "5df1000c-7c15-48cd-adcc-0c286e651fad"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 03:59:11 crc kubenswrapper[4798]: I1011 03:59:11.012955 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/08878c03-1346-41ac-9f4f-874cd48e2129-utilities" (OuterVolumeSpecName: "utilities") pod "08878c03-1346-41ac-9f4f-874cd48e2129" (UID: "08878c03-1346-41ac-9f4f-874cd48e2129"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 03:59:11 crc kubenswrapper[4798]: I1011 03:59:11.013046 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/de740119-5acc-48d0-aaa4-0a64e054cac8-utilities" (OuterVolumeSpecName: "utilities") pod "de740119-5acc-48d0-aaa4-0a64e054cac8" (UID: "de740119-5acc-48d0-aaa4-0a64e054cac8"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 03:59:11 crc kubenswrapper[4798]: I1011 03:59:11.016962 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c27e187f-5d34-4455-9a63-8e3f213139bc-kube-api-access-c2g9n" (OuterVolumeSpecName: "kube-api-access-c2g9n") pod "c27e187f-5d34-4455-9a63-8e3f213139bc" (UID: "c27e187f-5d34-4455-9a63-8e3f213139bc"). InnerVolumeSpecName "kube-api-access-c2g9n". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 03:59:11 crc kubenswrapper[4798]: I1011 03:59:11.018660 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/08878c03-1346-41ac-9f4f-874cd48e2129-kube-api-access-snwss" (OuterVolumeSpecName: "kube-api-access-snwss") pod "08878c03-1346-41ac-9f4f-874cd48e2129" (UID: "08878c03-1346-41ac-9f4f-874cd48e2129"). InnerVolumeSpecName "kube-api-access-snwss". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 03:59:11 crc kubenswrapper[4798]: I1011 03:59:11.018660 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/45c5e131-b2e8-4a3f-8c26-0dfe5cacee11-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "45c5e131-b2e8-4a3f-8c26-0dfe5cacee11" (UID: "45c5e131-b2e8-4a3f-8c26-0dfe5cacee11"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 03:59:11 crc kubenswrapper[4798]: I1011 03:59:11.049958 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/de740119-5acc-48d0-aaa4-0a64e054cac8-kube-api-access-nrgv5" (OuterVolumeSpecName: "kube-api-access-nrgv5") pod "de740119-5acc-48d0-aaa4-0a64e054cac8" (UID: "de740119-5acc-48d0-aaa4-0a64e054cac8"). InnerVolumeSpecName "kube-api-access-nrgv5". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 03:59:11 crc kubenswrapper[4798]: I1011 03:59:11.050817 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/45c5e131-b2e8-4a3f-8c26-0dfe5cacee11-kube-api-access-xfnw9" (OuterVolumeSpecName: "kube-api-access-xfnw9") pod "45c5e131-b2e8-4a3f-8c26-0dfe5cacee11" (UID: "45c5e131-b2e8-4a3f-8c26-0dfe5cacee11"). InnerVolumeSpecName "kube-api-access-xfnw9". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 03:59:11 crc kubenswrapper[4798]: I1011 03:59:11.051212 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/de740119-5acc-48d0-aaa4-0a64e054cac8-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "de740119-5acc-48d0-aaa4-0a64e054cac8" (UID: "de740119-5acc-48d0-aaa4-0a64e054cac8"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 03:59:11 crc kubenswrapper[4798]: I1011 03:59:11.051940 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5df1000c-7c15-48cd-adcc-0c286e651fad-kube-api-access-g48hs" (OuterVolumeSpecName: "kube-api-access-g48hs") pod "5df1000c-7c15-48cd-adcc-0c286e651fad" (UID: "5df1000c-7c15-48cd-adcc-0c286e651fad"). InnerVolumeSpecName "kube-api-access-g48hs". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 03:59:11 crc kubenswrapper[4798]: I1011 03:59:11.067242 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-rkg2t"] Oct 11 03:59:11 crc kubenswrapper[4798]: I1011 03:59:11.092139 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5df1000c-7c15-48cd-adcc-0c286e651fad-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5df1000c-7c15-48cd-adcc-0c286e651fad" (UID: "5df1000c-7c15-48cd-adcc-0c286e651fad"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 03:59:11 crc kubenswrapper[4798]: I1011 03:59:11.110868 4798 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/08878c03-1346-41ac-9f4f-874cd48e2129-utilities\") on node \"crc\" DevicePath \"\"" Oct 11 03:59:11 crc kubenswrapper[4798]: I1011 03:59:11.111129 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g48hs\" (UniqueName: \"kubernetes.io/projected/5df1000c-7c15-48cd-adcc-0c286e651fad-kube-api-access-g48hs\") on node \"crc\" DevicePath \"\"" Oct 11 03:59:11 crc kubenswrapper[4798]: I1011 03:59:11.111203 4798 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/45c5e131-b2e8-4a3f-8c26-0dfe5cacee11-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Oct 11 03:59:11 crc kubenswrapper[4798]: I1011 03:59:11.111260 4798 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c27e187f-5d34-4455-9a63-8e3f213139bc-utilities\") on node \"crc\" DevicePath \"\"" Oct 11 03:59:11 crc kubenswrapper[4798]: I1011 03:59:11.111320 4798 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5df1000c-7c15-48cd-adcc-0c286e651fad-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 11 03:59:11 crc kubenswrapper[4798]: I1011 03:59:11.111378 4798 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/de740119-5acc-48d0-aaa4-0a64e054cac8-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 11 03:59:11 crc kubenswrapper[4798]: I1011 03:59:11.111454 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-snwss\" (UniqueName: \"kubernetes.io/projected/08878c03-1346-41ac-9f4f-874cd48e2129-kube-api-access-snwss\") on node \"crc\" DevicePath \"\"" Oct 11 03:59:11 crc kubenswrapper[4798]: I1011 03:59:11.111528 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-c2g9n\" (UniqueName: \"kubernetes.io/projected/c27e187f-5d34-4455-9a63-8e3f213139bc-kube-api-access-c2g9n\") on node \"crc\" DevicePath \"\"" Oct 11 03:59:11 crc kubenswrapper[4798]: I1011 03:59:11.111583 4798 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5df1000c-7c15-48cd-adcc-0c286e651fad-utilities\") on node \"crc\" DevicePath \"\"" Oct 11 03:59:11 crc kubenswrapper[4798]: I1011 03:59:11.111636 4798 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/de740119-5acc-48d0-aaa4-0a64e054cac8-utilities\") on node \"crc\" DevicePath \"\"" Oct 11 03:59:11 crc kubenswrapper[4798]: I1011 03:59:11.111690 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xfnw9\" (UniqueName: \"kubernetes.io/projected/45c5e131-b2e8-4a3f-8c26-0dfe5cacee11-kube-api-access-xfnw9\") on node \"crc\" DevicePath \"\"" Oct 11 03:59:11 crc kubenswrapper[4798]: I1011 03:59:11.111792 4798 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/45c5e131-b2e8-4a3f-8c26-0dfe5cacee11-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Oct 11 03:59:11 crc kubenswrapper[4798]: I1011 03:59:11.111856 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nrgv5\" (UniqueName: \"kubernetes.io/projected/de740119-5acc-48d0-aaa4-0a64e054cac8-kube-api-access-nrgv5\") on node \"crc\" DevicePath \"\"" Oct 11 03:59:11 crc kubenswrapper[4798]: I1011 03:59:11.111720 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/08878c03-1346-41ac-9f4f-874cd48e2129-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "08878c03-1346-41ac-9f4f-874cd48e2129" (UID: "08878c03-1346-41ac-9f4f-874cd48e2129"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 03:59:11 crc kubenswrapper[4798]: I1011 03:59:11.130281 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c27e187f-5d34-4455-9a63-8e3f213139bc-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "c27e187f-5d34-4455-9a63-8e3f213139bc" (UID: "c27e187f-5d34-4455-9a63-8e3f213139bc"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 03:59:11 crc kubenswrapper[4798]: I1011 03:59:11.213733 4798 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c27e187f-5d34-4455-9a63-8e3f213139bc-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 11 03:59:11 crc kubenswrapper[4798]: I1011 03:59:11.213783 4798 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/08878c03-1346-41ac-9f4f-874cd48e2129-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 11 03:59:11 crc kubenswrapper[4798]: I1011 03:59:11.380888 4798 generic.go:334] "Generic (PLEG): container finished" podID="45c5e131-b2e8-4a3f-8c26-0dfe5cacee11" containerID="403233cb4d6cc930947779dbde5efdf608eae4e2004a4936d2f878d2794daf7e" exitCode=0 Oct 11 03:59:11 crc kubenswrapper[4798]: I1011 03:59:11.381046 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-dp4kr" Oct 11 03:59:11 crc kubenswrapper[4798]: I1011 03:59:11.381072 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-dp4kr" event={"ID":"45c5e131-b2e8-4a3f-8c26-0dfe5cacee11","Type":"ContainerDied","Data":"403233cb4d6cc930947779dbde5efdf608eae4e2004a4936d2f878d2794daf7e"} Oct 11 03:59:11 crc kubenswrapper[4798]: I1011 03:59:11.381186 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-dp4kr" event={"ID":"45c5e131-b2e8-4a3f-8c26-0dfe5cacee11","Type":"ContainerDied","Data":"add44596eb5fd6eabe1604330e45e7b9ffff5d3e08166f6b9d2e68b53ad16db5"} Oct 11 03:59:11 crc kubenswrapper[4798]: I1011 03:59:11.381240 4798 scope.go:117] "RemoveContainer" containerID="403233cb4d6cc930947779dbde5efdf608eae4e2004a4936d2f878d2794daf7e" Oct 11 03:59:11 crc kubenswrapper[4798]: I1011 03:59:11.384523 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-mvv66" Oct 11 03:59:11 crc kubenswrapper[4798]: I1011 03:59:11.384507 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mvv66" event={"ID":"de740119-5acc-48d0-aaa4-0a64e054cac8","Type":"ContainerDied","Data":"c2b5a728290701acd3977918422f343f12e40c5d6926425a845cce330bd70c57"} Oct 11 03:59:11 crc kubenswrapper[4798]: I1011 03:59:11.384379 4798 generic.go:334] "Generic (PLEG): container finished" podID="de740119-5acc-48d0-aaa4-0a64e054cac8" containerID="c2b5a728290701acd3977918422f343f12e40c5d6926425a845cce330bd70c57" exitCode=0 Oct 11 03:59:11 crc kubenswrapper[4798]: I1011 03:59:11.389621 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mvv66" event={"ID":"de740119-5acc-48d0-aaa4-0a64e054cac8","Type":"ContainerDied","Data":"924ddd51156a9217e5427ac3f67f2e247a4f862fd6a55ad5cf120235db2e1d20"} Oct 11 03:59:11 crc kubenswrapper[4798]: I1011 03:59:11.393265 4798 generic.go:334] "Generic (PLEG): container finished" podID="08878c03-1346-41ac-9f4f-874cd48e2129" containerID="cc9e901028fc84af065c9375550e0e1d67d2ba4280feb013c3d3cac12ad60489" exitCode=0 Oct 11 03:59:11 crc kubenswrapper[4798]: I1011 03:59:11.393350 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-7k4v2" event={"ID":"08878c03-1346-41ac-9f4f-874cd48e2129","Type":"ContainerDied","Data":"cc9e901028fc84af065c9375550e0e1d67d2ba4280feb013c3d3cac12ad60489"} Oct 11 03:59:11 crc kubenswrapper[4798]: I1011 03:59:11.393380 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-7k4v2" event={"ID":"08878c03-1346-41ac-9f4f-874cd48e2129","Type":"ContainerDied","Data":"92d6f1d2e21085802dce08dcf8f98a1f1dad8105b6c02fce152f1da1bf4ba86a"} Oct 11 03:59:11 crc kubenswrapper[4798]: I1011 03:59:11.393496 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7k4v2" Oct 11 03:59:11 crc kubenswrapper[4798]: I1011 03:59:11.398428 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-rkg2t" event={"ID":"0d156e8f-2a95-406a-beb0-c9b7e36f9e8b","Type":"ContainerStarted","Data":"bead6a2ada99da71a8d9c8fbad5e1348f0ccbdd0819d7261e487d8e89f653610"} Oct 11 03:59:11 crc kubenswrapper[4798]: I1011 03:59:11.398479 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-rkg2t" Oct 11 03:59:11 crc kubenswrapper[4798]: I1011 03:59:11.398496 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-rkg2t" event={"ID":"0d156e8f-2a95-406a-beb0-c9b7e36f9e8b","Type":"ContainerStarted","Data":"15f2ada769344ebe7aae35cf551af7855a3eb9a26a68bd4dd2ff302481d13a6a"} Oct 11 03:59:11 crc kubenswrapper[4798]: I1011 03:59:11.403462 4798 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-rkg2t container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.56:8080/healthz\": dial tcp 10.217.0.56:8080: connect: connection refused" start-of-body= Oct 11 03:59:11 crc kubenswrapper[4798]: I1011 03:59:11.403514 4798 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-rkg2t" podUID="0d156e8f-2a95-406a-beb0-c9b7e36f9e8b" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.56:8080/healthz\": dial tcp 10.217.0.56:8080: connect: connection refused" Oct 11 03:59:11 crc kubenswrapper[4798]: I1011 03:59:11.404792 4798 scope.go:117] "RemoveContainer" containerID="403233cb4d6cc930947779dbde5efdf608eae4e2004a4936d2f878d2794daf7e" Oct 11 03:59:11 crc kubenswrapper[4798]: E1011 03:59:11.405416 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"403233cb4d6cc930947779dbde5efdf608eae4e2004a4936d2f878d2794daf7e\": container with ID starting with 403233cb4d6cc930947779dbde5efdf608eae4e2004a4936d2f878d2794daf7e not found: ID does not exist" containerID="403233cb4d6cc930947779dbde5efdf608eae4e2004a4936d2f878d2794daf7e" Oct 11 03:59:11 crc kubenswrapper[4798]: I1011 03:59:11.405450 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"403233cb4d6cc930947779dbde5efdf608eae4e2004a4936d2f878d2794daf7e"} err="failed to get container status \"403233cb4d6cc930947779dbde5efdf608eae4e2004a4936d2f878d2794daf7e\": rpc error: code = NotFound desc = could not find container \"403233cb4d6cc930947779dbde5efdf608eae4e2004a4936d2f878d2794daf7e\": container with ID starting with 403233cb4d6cc930947779dbde5efdf608eae4e2004a4936d2f878d2794daf7e not found: ID does not exist" Oct 11 03:59:11 crc kubenswrapper[4798]: I1011 03:59:11.405502 4798 scope.go:117] "RemoveContainer" containerID="c2b5a728290701acd3977918422f343f12e40c5d6926425a845cce330bd70c57" Oct 11 03:59:11 crc kubenswrapper[4798]: I1011 03:59:11.409445 4798 generic.go:334] "Generic (PLEG): container finished" podID="5df1000c-7c15-48cd-adcc-0c286e651fad" containerID="25402c0fe51dc0118b2de6a1cedb19603b70fe7057bd26b5b115a6c96d3424a0" exitCode=0 Oct 11 03:59:11 crc kubenswrapper[4798]: I1011 03:59:11.409499 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-l8bxj" event={"ID":"5df1000c-7c15-48cd-adcc-0c286e651fad","Type":"ContainerDied","Data":"25402c0fe51dc0118b2de6a1cedb19603b70fe7057bd26b5b115a6c96d3424a0"} Oct 11 03:59:11 crc kubenswrapper[4798]: I1011 03:59:11.409526 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-l8bxj" event={"ID":"5df1000c-7c15-48cd-adcc-0c286e651fad","Type":"ContainerDied","Data":"06ae338d2c522e05ba2fad27617ea4f127717d6fb7991c766dea113c518107d6"} Oct 11 03:59:11 crc kubenswrapper[4798]: I1011 03:59:11.409599 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-l8bxj" Oct 11 03:59:11 crc kubenswrapper[4798]: I1011 03:59:11.422441 4798 generic.go:334] "Generic (PLEG): container finished" podID="c27e187f-5d34-4455-9a63-8e3f213139bc" containerID="a9d117cb51c4ad6aa00b2cd2223a188d200bccdc723ffbcf9600962b2c855743" exitCode=0 Oct 11 03:59:11 crc kubenswrapper[4798]: I1011 03:59:11.422607 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-mlj2w" Oct 11 03:59:11 crc kubenswrapper[4798]: I1011 03:59:11.426707 4798 scope.go:117] "RemoveContainer" containerID="3635947bc7598f7b73d8b70fcbb228c5aa862eaa9a1dd8b93e7af0d379d14d99" Oct 11 03:59:11 crc kubenswrapper[4798]: I1011 03:59:11.434182 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-rkg2t" podStartSLOduration=1.43416078 podStartE2EDuration="1.43416078s" podCreationTimestamp="2025-10-11 03:59:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 03:59:11.42088314 +0000 UTC m=+246.757172826" watchObservedRunningTime="2025-10-11 03:59:11.43416078 +0000 UTC m=+246.770450466" Oct 11 03:59:11 crc kubenswrapper[4798]: I1011 03:59:11.434477 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mlj2w" event={"ID":"c27e187f-5d34-4455-9a63-8e3f213139bc","Type":"ContainerDied","Data":"a9d117cb51c4ad6aa00b2cd2223a188d200bccdc723ffbcf9600962b2c855743"} Oct 11 03:59:11 crc kubenswrapper[4798]: I1011 03:59:11.434528 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mlj2w" event={"ID":"c27e187f-5d34-4455-9a63-8e3f213139bc","Type":"ContainerDied","Data":"ef435e418ee8aa655df4dd191a6afb07ef992677311560b0f5aeed1f8bcf787e"} Oct 11 03:59:11 crc kubenswrapper[4798]: I1011 03:59:11.442241 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-7k4v2"] Oct 11 03:59:11 crc kubenswrapper[4798]: I1011 03:59:11.448018 4798 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-7k4v2"] Oct 11 03:59:11 crc kubenswrapper[4798]: I1011 03:59:11.452476 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-dp4kr"] Oct 11 03:59:11 crc kubenswrapper[4798]: I1011 03:59:11.457457 4798 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-dp4kr"] Oct 11 03:59:11 crc kubenswrapper[4798]: I1011 03:59:11.469707 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-mvv66"] Oct 11 03:59:11 crc kubenswrapper[4798]: I1011 03:59:11.478967 4798 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-mvv66"] Oct 11 03:59:11 crc kubenswrapper[4798]: I1011 03:59:11.479365 4798 scope.go:117] "RemoveContainer" containerID="73a656b9737f12fccc32e8e0238c9d5f5a29a6bb03abf7bc1d249a34631be68a" Oct 11 03:59:11 crc kubenswrapper[4798]: I1011 03:59:11.483643 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-l8bxj"] Oct 11 03:59:11 crc kubenswrapper[4798]: I1011 03:59:11.494526 4798 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-l8bxj"] Oct 11 03:59:11 crc kubenswrapper[4798]: I1011 03:59:11.498272 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-mlj2w"] Oct 11 03:59:11 crc kubenswrapper[4798]: I1011 03:59:11.500641 4798 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-mlj2w"] Oct 11 03:59:11 crc kubenswrapper[4798]: I1011 03:59:11.505739 4798 scope.go:117] "RemoveContainer" containerID="c2b5a728290701acd3977918422f343f12e40c5d6926425a845cce330bd70c57" Oct 11 03:59:11 crc kubenswrapper[4798]: E1011 03:59:11.506190 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c2b5a728290701acd3977918422f343f12e40c5d6926425a845cce330bd70c57\": container with ID starting with c2b5a728290701acd3977918422f343f12e40c5d6926425a845cce330bd70c57 not found: ID does not exist" containerID="c2b5a728290701acd3977918422f343f12e40c5d6926425a845cce330bd70c57" Oct 11 03:59:11 crc kubenswrapper[4798]: I1011 03:59:11.506225 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c2b5a728290701acd3977918422f343f12e40c5d6926425a845cce330bd70c57"} err="failed to get container status \"c2b5a728290701acd3977918422f343f12e40c5d6926425a845cce330bd70c57\": rpc error: code = NotFound desc = could not find container \"c2b5a728290701acd3977918422f343f12e40c5d6926425a845cce330bd70c57\": container with ID starting with c2b5a728290701acd3977918422f343f12e40c5d6926425a845cce330bd70c57 not found: ID does not exist" Oct 11 03:59:11 crc kubenswrapper[4798]: I1011 03:59:11.506251 4798 scope.go:117] "RemoveContainer" containerID="3635947bc7598f7b73d8b70fcbb228c5aa862eaa9a1dd8b93e7af0d379d14d99" Oct 11 03:59:11 crc kubenswrapper[4798]: E1011 03:59:11.507439 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3635947bc7598f7b73d8b70fcbb228c5aa862eaa9a1dd8b93e7af0d379d14d99\": container with ID starting with 3635947bc7598f7b73d8b70fcbb228c5aa862eaa9a1dd8b93e7af0d379d14d99 not found: ID does not exist" containerID="3635947bc7598f7b73d8b70fcbb228c5aa862eaa9a1dd8b93e7af0d379d14d99" Oct 11 03:59:11 crc kubenswrapper[4798]: I1011 03:59:11.507502 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3635947bc7598f7b73d8b70fcbb228c5aa862eaa9a1dd8b93e7af0d379d14d99"} err="failed to get container status \"3635947bc7598f7b73d8b70fcbb228c5aa862eaa9a1dd8b93e7af0d379d14d99\": rpc error: code = NotFound desc = could not find container \"3635947bc7598f7b73d8b70fcbb228c5aa862eaa9a1dd8b93e7af0d379d14d99\": container with ID starting with 3635947bc7598f7b73d8b70fcbb228c5aa862eaa9a1dd8b93e7af0d379d14d99 not found: ID does not exist" Oct 11 03:59:11 crc kubenswrapper[4798]: I1011 03:59:11.507539 4798 scope.go:117] "RemoveContainer" containerID="73a656b9737f12fccc32e8e0238c9d5f5a29a6bb03abf7bc1d249a34631be68a" Oct 11 03:59:11 crc kubenswrapper[4798]: E1011 03:59:11.508073 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"73a656b9737f12fccc32e8e0238c9d5f5a29a6bb03abf7bc1d249a34631be68a\": container with ID starting with 73a656b9737f12fccc32e8e0238c9d5f5a29a6bb03abf7bc1d249a34631be68a not found: ID does not exist" containerID="73a656b9737f12fccc32e8e0238c9d5f5a29a6bb03abf7bc1d249a34631be68a" Oct 11 03:59:11 crc kubenswrapper[4798]: I1011 03:59:11.508102 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"73a656b9737f12fccc32e8e0238c9d5f5a29a6bb03abf7bc1d249a34631be68a"} err="failed to get container status \"73a656b9737f12fccc32e8e0238c9d5f5a29a6bb03abf7bc1d249a34631be68a\": rpc error: code = NotFound desc = could not find container \"73a656b9737f12fccc32e8e0238c9d5f5a29a6bb03abf7bc1d249a34631be68a\": container with ID starting with 73a656b9737f12fccc32e8e0238c9d5f5a29a6bb03abf7bc1d249a34631be68a not found: ID does not exist" Oct 11 03:59:11 crc kubenswrapper[4798]: I1011 03:59:11.508121 4798 scope.go:117] "RemoveContainer" containerID="cc9e901028fc84af065c9375550e0e1d67d2ba4280feb013c3d3cac12ad60489" Oct 11 03:59:11 crc kubenswrapper[4798]: I1011 03:59:11.550163 4798 scope.go:117] "RemoveContainer" containerID="f5ee9e499c97b5b304f4c8253e94df3aaca4553ae12ed04782c2c3841182f4e5" Oct 11 03:59:11 crc kubenswrapper[4798]: I1011 03:59:11.578916 4798 scope.go:117] "RemoveContainer" containerID="2fe7a29c2983a8f378bdc87f3776b44298bd4a548068a9878532e12ae8e3adfc" Oct 11 03:59:11 crc kubenswrapper[4798]: I1011 03:59:11.600818 4798 scope.go:117] "RemoveContainer" containerID="cc9e901028fc84af065c9375550e0e1d67d2ba4280feb013c3d3cac12ad60489" Oct 11 03:59:11 crc kubenswrapper[4798]: E1011 03:59:11.601564 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cc9e901028fc84af065c9375550e0e1d67d2ba4280feb013c3d3cac12ad60489\": container with ID starting with cc9e901028fc84af065c9375550e0e1d67d2ba4280feb013c3d3cac12ad60489 not found: ID does not exist" containerID="cc9e901028fc84af065c9375550e0e1d67d2ba4280feb013c3d3cac12ad60489" Oct 11 03:59:11 crc kubenswrapper[4798]: I1011 03:59:11.601602 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cc9e901028fc84af065c9375550e0e1d67d2ba4280feb013c3d3cac12ad60489"} err="failed to get container status \"cc9e901028fc84af065c9375550e0e1d67d2ba4280feb013c3d3cac12ad60489\": rpc error: code = NotFound desc = could not find container \"cc9e901028fc84af065c9375550e0e1d67d2ba4280feb013c3d3cac12ad60489\": container with ID starting with cc9e901028fc84af065c9375550e0e1d67d2ba4280feb013c3d3cac12ad60489 not found: ID does not exist" Oct 11 03:59:11 crc kubenswrapper[4798]: I1011 03:59:11.601645 4798 scope.go:117] "RemoveContainer" containerID="f5ee9e499c97b5b304f4c8253e94df3aaca4553ae12ed04782c2c3841182f4e5" Oct 11 03:59:11 crc kubenswrapper[4798]: E1011 03:59:11.602578 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f5ee9e499c97b5b304f4c8253e94df3aaca4553ae12ed04782c2c3841182f4e5\": container with ID starting with f5ee9e499c97b5b304f4c8253e94df3aaca4553ae12ed04782c2c3841182f4e5 not found: ID does not exist" containerID="f5ee9e499c97b5b304f4c8253e94df3aaca4553ae12ed04782c2c3841182f4e5" Oct 11 03:59:11 crc kubenswrapper[4798]: I1011 03:59:11.602600 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f5ee9e499c97b5b304f4c8253e94df3aaca4553ae12ed04782c2c3841182f4e5"} err="failed to get container status \"f5ee9e499c97b5b304f4c8253e94df3aaca4553ae12ed04782c2c3841182f4e5\": rpc error: code = NotFound desc = could not find container \"f5ee9e499c97b5b304f4c8253e94df3aaca4553ae12ed04782c2c3841182f4e5\": container with ID starting with f5ee9e499c97b5b304f4c8253e94df3aaca4553ae12ed04782c2c3841182f4e5 not found: ID does not exist" Oct 11 03:59:11 crc kubenswrapper[4798]: I1011 03:59:11.602636 4798 scope.go:117] "RemoveContainer" containerID="2fe7a29c2983a8f378bdc87f3776b44298bd4a548068a9878532e12ae8e3adfc" Oct 11 03:59:11 crc kubenswrapper[4798]: E1011 03:59:11.602867 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2fe7a29c2983a8f378bdc87f3776b44298bd4a548068a9878532e12ae8e3adfc\": container with ID starting with 2fe7a29c2983a8f378bdc87f3776b44298bd4a548068a9878532e12ae8e3adfc not found: ID does not exist" containerID="2fe7a29c2983a8f378bdc87f3776b44298bd4a548068a9878532e12ae8e3adfc" Oct 11 03:59:11 crc kubenswrapper[4798]: I1011 03:59:11.602888 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2fe7a29c2983a8f378bdc87f3776b44298bd4a548068a9878532e12ae8e3adfc"} err="failed to get container status \"2fe7a29c2983a8f378bdc87f3776b44298bd4a548068a9878532e12ae8e3adfc\": rpc error: code = NotFound desc = could not find container \"2fe7a29c2983a8f378bdc87f3776b44298bd4a548068a9878532e12ae8e3adfc\": container with ID starting with 2fe7a29c2983a8f378bdc87f3776b44298bd4a548068a9878532e12ae8e3adfc not found: ID does not exist" Oct 11 03:59:11 crc kubenswrapper[4798]: I1011 03:59:11.602904 4798 scope.go:117] "RemoveContainer" containerID="25402c0fe51dc0118b2de6a1cedb19603b70fe7057bd26b5b115a6c96d3424a0" Oct 11 03:59:11 crc kubenswrapper[4798]: I1011 03:59:11.620450 4798 scope.go:117] "RemoveContainer" containerID="8c80ff94306640d8cabe32af18507336b3a88695d320bc359824476a4482bc08" Oct 11 03:59:11 crc kubenswrapper[4798]: I1011 03:59:11.644221 4798 scope.go:117] "RemoveContainer" containerID="2fd2e0d05737597dd13740074cdf4f1f772374111cc3ba002d863c680066a67f" Oct 11 03:59:11 crc kubenswrapper[4798]: I1011 03:59:11.658971 4798 scope.go:117] "RemoveContainer" containerID="25402c0fe51dc0118b2de6a1cedb19603b70fe7057bd26b5b115a6c96d3424a0" Oct 11 03:59:11 crc kubenswrapper[4798]: E1011 03:59:11.659481 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"25402c0fe51dc0118b2de6a1cedb19603b70fe7057bd26b5b115a6c96d3424a0\": container with ID starting with 25402c0fe51dc0118b2de6a1cedb19603b70fe7057bd26b5b115a6c96d3424a0 not found: ID does not exist" containerID="25402c0fe51dc0118b2de6a1cedb19603b70fe7057bd26b5b115a6c96d3424a0" Oct 11 03:59:11 crc kubenswrapper[4798]: I1011 03:59:11.659541 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"25402c0fe51dc0118b2de6a1cedb19603b70fe7057bd26b5b115a6c96d3424a0"} err="failed to get container status \"25402c0fe51dc0118b2de6a1cedb19603b70fe7057bd26b5b115a6c96d3424a0\": rpc error: code = NotFound desc = could not find container \"25402c0fe51dc0118b2de6a1cedb19603b70fe7057bd26b5b115a6c96d3424a0\": container with ID starting with 25402c0fe51dc0118b2de6a1cedb19603b70fe7057bd26b5b115a6c96d3424a0 not found: ID does not exist" Oct 11 03:59:11 crc kubenswrapper[4798]: I1011 03:59:11.659575 4798 scope.go:117] "RemoveContainer" containerID="8c80ff94306640d8cabe32af18507336b3a88695d320bc359824476a4482bc08" Oct 11 03:59:11 crc kubenswrapper[4798]: E1011 03:59:11.659873 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8c80ff94306640d8cabe32af18507336b3a88695d320bc359824476a4482bc08\": container with ID starting with 8c80ff94306640d8cabe32af18507336b3a88695d320bc359824476a4482bc08 not found: ID does not exist" containerID="8c80ff94306640d8cabe32af18507336b3a88695d320bc359824476a4482bc08" Oct 11 03:59:11 crc kubenswrapper[4798]: I1011 03:59:11.659908 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8c80ff94306640d8cabe32af18507336b3a88695d320bc359824476a4482bc08"} err="failed to get container status \"8c80ff94306640d8cabe32af18507336b3a88695d320bc359824476a4482bc08\": rpc error: code = NotFound desc = could not find container \"8c80ff94306640d8cabe32af18507336b3a88695d320bc359824476a4482bc08\": container with ID starting with 8c80ff94306640d8cabe32af18507336b3a88695d320bc359824476a4482bc08 not found: ID does not exist" Oct 11 03:59:11 crc kubenswrapper[4798]: I1011 03:59:11.659930 4798 scope.go:117] "RemoveContainer" containerID="2fd2e0d05737597dd13740074cdf4f1f772374111cc3ba002d863c680066a67f" Oct 11 03:59:11 crc kubenswrapper[4798]: E1011 03:59:11.660246 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2fd2e0d05737597dd13740074cdf4f1f772374111cc3ba002d863c680066a67f\": container with ID starting with 2fd2e0d05737597dd13740074cdf4f1f772374111cc3ba002d863c680066a67f not found: ID does not exist" containerID="2fd2e0d05737597dd13740074cdf4f1f772374111cc3ba002d863c680066a67f" Oct 11 03:59:11 crc kubenswrapper[4798]: I1011 03:59:11.660273 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2fd2e0d05737597dd13740074cdf4f1f772374111cc3ba002d863c680066a67f"} err="failed to get container status \"2fd2e0d05737597dd13740074cdf4f1f772374111cc3ba002d863c680066a67f\": rpc error: code = NotFound desc = could not find container \"2fd2e0d05737597dd13740074cdf4f1f772374111cc3ba002d863c680066a67f\": container with ID starting with 2fd2e0d05737597dd13740074cdf4f1f772374111cc3ba002d863c680066a67f not found: ID does not exist" Oct 11 03:59:11 crc kubenswrapper[4798]: I1011 03:59:11.660294 4798 scope.go:117] "RemoveContainer" containerID="a9d117cb51c4ad6aa00b2cd2223a188d200bccdc723ffbcf9600962b2c855743" Oct 11 03:59:11 crc kubenswrapper[4798]: I1011 03:59:11.673945 4798 scope.go:117] "RemoveContainer" containerID="6d0807f660ce06a144606531ffe289b31590cba612f84f6ffde035e2e3245316" Oct 11 03:59:11 crc kubenswrapper[4798]: I1011 03:59:11.701498 4798 scope.go:117] "RemoveContainer" containerID="49aa1adfff2fd4438b35d1760053c7ecb92242a82ae6576cfeb4c84eddf99f09" Oct 11 03:59:11 crc kubenswrapper[4798]: I1011 03:59:11.728193 4798 scope.go:117] "RemoveContainer" containerID="a9d117cb51c4ad6aa00b2cd2223a188d200bccdc723ffbcf9600962b2c855743" Oct 11 03:59:11 crc kubenswrapper[4798]: E1011 03:59:11.728957 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a9d117cb51c4ad6aa00b2cd2223a188d200bccdc723ffbcf9600962b2c855743\": container with ID starting with a9d117cb51c4ad6aa00b2cd2223a188d200bccdc723ffbcf9600962b2c855743 not found: ID does not exist" containerID="a9d117cb51c4ad6aa00b2cd2223a188d200bccdc723ffbcf9600962b2c855743" Oct 11 03:59:11 crc kubenswrapper[4798]: I1011 03:59:11.729020 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a9d117cb51c4ad6aa00b2cd2223a188d200bccdc723ffbcf9600962b2c855743"} err="failed to get container status \"a9d117cb51c4ad6aa00b2cd2223a188d200bccdc723ffbcf9600962b2c855743\": rpc error: code = NotFound desc = could not find container \"a9d117cb51c4ad6aa00b2cd2223a188d200bccdc723ffbcf9600962b2c855743\": container with ID starting with a9d117cb51c4ad6aa00b2cd2223a188d200bccdc723ffbcf9600962b2c855743 not found: ID does not exist" Oct 11 03:59:11 crc kubenswrapper[4798]: I1011 03:59:11.729068 4798 scope.go:117] "RemoveContainer" containerID="6d0807f660ce06a144606531ffe289b31590cba612f84f6ffde035e2e3245316" Oct 11 03:59:11 crc kubenswrapper[4798]: E1011 03:59:11.729806 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6d0807f660ce06a144606531ffe289b31590cba612f84f6ffde035e2e3245316\": container with ID starting with 6d0807f660ce06a144606531ffe289b31590cba612f84f6ffde035e2e3245316 not found: ID does not exist" containerID="6d0807f660ce06a144606531ffe289b31590cba612f84f6ffde035e2e3245316" Oct 11 03:59:11 crc kubenswrapper[4798]: I1011 03:59:11.729857 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6d0807f660ce06a144606531ffe289b31590cba612f84f6ffde035e2e3245316"} err="failed to get container status \"6d0807f660ce06a144606531ffe289b31590cba612f84f6ffde035e2e3245316\": rpc error: code = NotFound desc = could not find container \"6d0807f660ce06a144606531ffe289b31590cba612f84f6ffde035e2e3245316\": container with ID starting with 6d0807f660ce06a144606531ffe289b31590cba612f84f6ffde035e2e3245316 not found: ID does not exist" Oct 11 03:59:11 crc kubenswrapper[4798]: I1011 03:59:11.729900 4798 scope.go:117] "RemoveContainer" containerID="49aa1adfff2fd4438b35d1760053c7ecb92242a82ae6576cfeb4c84eddf99f09" Oct 11 03:59:11 crc kubenswrapper[4798]: E1011 03:59:11.730212 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"49aa1adfff2fd4438b35d1760053c7ecb92242a82ae6576cfeb4c84eddf99f09\": container with ID starting with 49aa1adfff2fd4438b35d1760053c7ecb92242a82ae6576cfeb4c84eddf99f09 not found: ID does not exist" containerID="49aa1adfff2fd4438b35d1760053c7ecb92242a82ae6576cfeb4c84eddf99f09" Oct 11 03:59:11 crc kubenswrapper[4798]: I1011 03:59:11.730247 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"49aa1adfff2fd4438b35d1760053c7ecb92242a82ae6576cfeb4c84eddf99f09"} err="failed to get container status \"49aa1adfff2fd4438b35d1760053c7ecb92242a82ae6576cfeb4c84eddf99f09\": rpc error: code = NotFound desc = could not find container \"49aa1adfff2fd4438b35d1760053c7ecb92242a82ae6576cfeb4c84eddf99f09\": container with ID starting with 49aa1adfff2fd4438b35d1760053c7ecb92242a82ae6576cfeb4c84eddf99f09 not found: ID does not exist" Oct 11 03:59:11 crc kubenswrapper[4798]: I1011 03:59:11.745980 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-kw978"] Oct 11 03:59:12 crc kubenswrapper[4798]: I1011 03:59:12.441860 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-rkg2t" Oct 11 03:59:12 crc kubenswrapper[4798]: I1011 03:59:12.589288 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-v8lpr"] Oct 11 03:59:12 crc kubenswrapper[4798]: E1011 03:59:12.589557 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5df1000c-7c15-48cd-adcc-0c286e651fad" containerName="extract-content" Oct 11 03:59:12 crc kubenswrapper[4798]: I1011 03:59:12.589578 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="5df1000c-7c15-48cd-adcc-0c286e651fad" containerName="extract-content" Oct 11 03:59:12 crc kubenswrapper[4798]: E1011 03:59:12.589594 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="de740119-5acc-48d0-aaa4-0a64e054cac8" containerName="extract-content" Oct 11 03:59:12 crc kubenswrapper[4798]: I1011 03:59:12.589601 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="de740119-5acc-48d0-aaa4-0a64e054cac8" containerName="extract-content" Oct 11 03:59:12 crc kubenswrapper[4798]: E1011 03:59:12.589610 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c27e187f-5d34-4455-9a63-8e3f213139bc" containerName="registry-server" Oct 11 03:59:12 crc kubenswrapper[4798]: I1011 03:59:12.589617 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="c27e187f-5d34-4455-9a63-8e3f213139bc" containerName="registry-server" Oct 11 03:59:12 crc kubenswrapper[4798]: E1011 03:59:12.589629 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5df1000c-7c15-48cd-adcc-0c286e651fad" containerName="extract-utilities" Oct 11 03:59:12 crc kubenswrapper[4798]: I1011 03:59:12.589690 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="5df1000c-7c15-48cd-adcc-0c286e651fad" containerName="extract-utilities" Oct 11 03:59:12 crc kubenswrapper[4798]: E1011 03:59:12.589700 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="08878c03-1346-41ac-9f4f-874cd48e2129" containerName="registry-server" Oct 11 03:59:12 crc kubenswrapper[4798]: I1011 03:59:12.589709 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="08878c03-1346-41ac-9f4f-874cd48e2129" containerName="registry-server" Oct 11 03:59:12 crc kubenswrapper[4798]: E1011 03:59:12.590926 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="08878c03-1346-41ac-9f4f-874cd48e2129" containerName="extract-utilities" Oct 11 03:59:12 crc kubenswrapper[4798]: I1011 03:59:12.590954 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="08878c03-1346-41ac-9f4f-874cd48e2129" containerName="extract-utilities" Oct 11 03:59:12 crc kubenswrapper[4798]: E1011 03:59:12.590968 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c27e187f-5d34-4455-9a63-8e3f213139bc" containerName="extract-utilities" Oct 11 03:59:12 crc kubenswrapper[4798]: I1011 03:59:12.590975 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="c27e187f-5d34-4455-9a63-8e3f213139bc" containerName="extract-utilities" Oct 11 03:59:12 crc kubenswrapper[4798]: E1011 03:59:12.590986 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5df1000c-7c15-48cd-adcc-0c286e651fad" containerName="registry-server" Oct 11 03:59:12 crc kubenswrapper[4798]: I1011 03:59:12.590995 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="5df1000c-7c15-48cd-adcc-0c286e651fad" containerName="registry-server" Oct 11 03:59:12 crc kubenswrapper[4798]: E1011 03:59:12.591009 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="45c5e131-b2e8-4a3f-8c26-0dfe5cacee11" containerName="marketplace-operator" Oct 11 03:59:12 crc kubenswrapper[4798]: I1011 03:59:12.591017 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="45c5e131-b2e8-4a3f-8c26-0dfe5cacee11" containerName="marketplace-operator" Oct 11 03:59:12 crc kubenswrapper[4798]: E1011 03:59:12.591025 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c27e187f-5d34-4455-9a63-8e3f213139bc" containerName="extract-content" Oct 11 03:59:12 crc kubenswrapper[4798]: I1011 03:59:12.591044 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="c27e187f-5d34-4455-9a63-8e3f213139bc" containerName="extract-content" Oct 11 03:59:12 crc kubenswrapper[4798]: E1011 03:59:12.591053 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="08878c03-1346-41ac-9f4f-874cd48e2129" containerName="extract-content" Oct 11 03:59:12 crc kubenswrapper[4798]: I1011 03:59:12.591060 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="08878c03-1346-41ac-9f4f-874cd48e2129" containerName="extract-content" Oct 11 03:59:12 crc kubenswrapper[4798]: E1011 03:59:12.591073 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="de740119-5acc-48d0-aaa4-0a64e054cac8" containerName="registry-server" Oct 11 03:59:12 crc kubenswrapper[4798]: I1011 03:59:12.591081 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="de740119-5acc-48d0-aaa4-0a64e054cac8" containerName="registry-server" Oct 11 03:59:12 crc kubenswrapper[4798]: E1011 03:59:12.591093 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="de740119-5acc-48d0-aaa4-0a64e054cac8" containerName="extract-utilities" Oct 11 03:59:12 crc kubenswrapper[4798]: I1011 03:59:12.591100 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="de740119-5acc-48d0-aaa4-0a64e054cac8" containerName="extract-utilities" Oct 11 03:59:12 crc kubenswrapper[4798]: I1011 03:59:12.591231 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="08878c03-1346-41ac-9f4f-874cd48e2129" containerName="registry-server" Oct 11 03:59:12 crc kubenswrapper[4798]: I1011 03:59:12.591245 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="5df1000c-7c15-48cd-adcc-0c286e651fad" containerName="registry-server" Oct 11 03:59:12 crc kubenswrapper[4798]: I1011 03:59:12.591259 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="45c5e131-b2e8-4a3f-8c26-0dfe5cacee11" containerName="marketplace-operator" Oct 11 03:59:12 crc kubenswrapper[4798]: I1011 03:59:12.591271 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="c27e187f-5d34-4455-9a63-8e3f213139bc" containerName="registry-server" Oct 11 03:59:12 crc kubenswrapper[4798]: I1011 03:59:12.591281 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="de740119-5acc-48d0-aaa4-0a64e054cac8" containerName="registry-server" Oct 11 03:59:12 crc kubenswrapper[4798]: I1011 03:59:12.592165 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-v8lpr" Oct 11 03:59:12 crc kubenswrapper[4798]: I1011 03:59:12.595413 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Oct 11 03:59:12 crc kubenswrapper[4798]: I1011 03:59:12.612369 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-v8lpr"] Oct 11 03:59:12 crc kubenswrapper[4798]: I1011 03:59:12.736040 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9lqvf\" (UniqueName: \"kubernetes.io/projected/462b253d-c42e-4640-acdc-defc5c34032f-kube-api-access-9lqvf\") pod \"redhat-marketplace-v8lpr\" (UID: \"462b253d-c42e-4640-acdc-defc5c34032f\") " pod="openshift-marketplace/redhat-marketplace-v8lpr" Oct 11 03:59:12 crc kubenswrapper[4798]: I1011 03:59:12.736105 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/462b253d-c42e-4640-acdc-defc5c34032f-catalog-content\") pod \"redhat-marketplace-v8lpr\" (UID: \"462b253d-c42e-4640-acdc-defc5c34032f\") " pod="openshift-marketplace/redhat-marketplace-v8lpr" Oct 11 03:59:12 crc kubenswrapper[4798]: I1011 03:59:12.736140 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/462b253d-c42e-4640-acdc-defc5c34032f-utilities\") pod \"redhat-marketplace-v8lpr\" (UID: \"462b253d-c42e-4640-acdc-defc5c34032f\") " pod="openshift-marketplace/redhat-marketplace-v8lpr" Oct 11 03:59:12 crc kubenswrapper[4798]: I1011 03:59:12.793914 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-h4bsh"] Oct 11 03:59:12 crc kubenswrapper[4798]: I1011 03:59:12.800103 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-h4bsh"] Oct 11 03:59:12 crc kubenswrapper[4798]: I1011 03:59:12.800239 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-h4bsh" Oct 11 03:59:12 crc kubenswrapper[4798]: I1011 03:59:12.802574 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Oct 11 03:59:12 crc kubenswrapper[4798]: I1011 03:59:12.838651 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9lqvf\" (UniqueName: \"kubernetes.io/projected/462b253d-c42e-4640-acdc-defc5c34032f-kube-api-access-9lqvf\") pod \"redhat-marketplace-v8lpr\" (UID: \"462b253d-c42e-4640-acdc-defc5c34032f\") " pod="openshift-marketplace/redhat-marketplace-v8lpr" Oct 11 03:59:12 crc kubenswrapper[4798]: I1011 03:59:12.838717 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/462b253d-c42e-4640-acdc-defc5c34032f-catalog-content\") pod \"redhat-marketplace-v8lpr\" (UID: \"462b253d-c42e-4640-acdc-defc5c34032f\") " pod="openshift-marketplace/redhat-marketplace-v8lpr" Oct 11 03:59:12 crc kubenswrapper[4798]: I1011 03:59:12.838759 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/462b253d-c42e-4640-acdc-defc5c34032f-utilities\") pod \"redhat-marketplace-v8lpr\" (UID: \"462b253d-c42e-4640-acdc-defc5c34032f\") " pod="openshift-marketplace/redhat-marketplace-v8lpr" Oct 11 03:59:12 crc kubenswrapper[4798]: I1011 03:59:12.839250 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/462b253d-c42e-4640-acdc-defc5c34032f-utilities\") pod \"redhat-marketplace-v8lpr\" (UID: \"462b253d-c42e-4640-acdc-defc5c34032f\") " pod="openshift-marketplace/redhat-marketplace-v8lpr" Oct 11 03:59:12 crc kubenswrapper[4798]: I1011 03:59:12.839291 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/462b253d-c42e-4640-acdc-defc5c34032f-catalog-content\") pod \"redhat-marketplace-v8lpr\" (UID: \"462b253d-c42e-4640-acdc-defc5c34032f\") " pod="openshift-marketplace/redhat-marketplace-v8lpr" Oct 11 03:59:12 crc kubenswrapper[4798]: I1011 03:59:12.861671 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9lqvf\" (UniqueName: \"kubernetes.io/projected/462b253d-c42e-4640-acdc-defc5c34032f-kube-api-access-9lqvf\") pod \"redhat-marketplace-v8lpr\" (UID: \"462b253d-c42e-4640-acdc-defc5c34032f\") " pod="openshift-marketplace/redhat-marketplace-v8lpr" Oct 11 03:59:12 crc kubenswrapper[4798]: I1011 03:59:12.940772 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5315221c-e6ab-4c3f-9d74-5bf456a5b78e-catalog-content\") pod \"redhat-operators-h4bsh\" (UID: \"5315221c-e6ab-4c3f-9d74-5bf456a5b78e\") " pod="openshift-marketplace/redhat-operators-h4bsh" Oct 11 03:59:12 crc kubenswrapper[4798]: I1011 03:59:12.940845 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5315221c-e6ab-4c3f-9d74-5bf456a5b78e-utilities\") pod \"redhat-operators-h4bsh\" (UID: \"5315221c-e6ab-4c3f-9d74-5bf456a5b78e\") " pod="openshift-marketplace/redhat-operators-h4bsh" Oct 11 03:59:12 crc kubenswrapper[4798]: I1011 03:59:12.940874 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-whdb4\" (UniqueName: \"kubernetes.io/projected/5315221c-e6ab-4c3f-9d74-5bf456a5b78e-kube-api-access-whdb4\") pod \"redhat-operators-h4bsh\" (UID: \"5315221c-e6ab-4c3f-9d74-5bf456a5b78e\") " pod="openshift-marketplace/redhat-operators-h4bsh" Oct 11 03:59:12 crc kubenswrapper[4798]: I1011 03:59:12.977808 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-v8lpr" Oct 11 03:59:13 crc kubenswrapper[4798]: I1011 03:59:13.042013 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-whdb4\" (UniqueName: \"kubernetes.io/projected/5315221c-e6ab-4c3f-9d74-5bf456a5b78e-kube-api-access-whdb4\") pod \"redhat-operators-h4bsh\" (UID: \"5315221c-e6ab-4c3f-9d74-5bf456a5b78e\") " pod="openshift-marketplace/redhat-operators-h4bsh" Oct 11 03:59:13 crc kubenswrapper[4798]: I1011 03:59:13.042110 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5315221c-e6ab-4c3f-9d74-5bf456a5b78e-catalog-content\") pod \"redhat-operators-h4bsh\" (UID: \"5315221c-e6ab-4c3f-9d74-5bf456a5b78e\") " pod="openshift-marketplace/redhat-operators-h4bsh" Oct 11 03:59:13 crc kubenswrapper[4798]: I1011 03:59:13.042151 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5315221c-e6ab-4c3f-9d74-5bf456a5b78e-utilities\") pod \"redhat-operators-h4bsh\" (UID: \"5315221c-e6ab-4c3f-9d74-5bf456a5b78e\") " pod="openshift-marketplace/redhat-operators-h4bsh" Oct 11 03:59:13 crc kubenswrapper[4798]: I1011 03:59:13.042699 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5315221c-e6ab-4c3f-9d74-5bf456a5b78e-utilities\") pod \"redhat-operators-h4bsh\" (UID: \"5315221c-e6ab-4c3f-9d74-5bf456a5b78e\") " pod="openshift-marketplace/redhat-operators-h4bsh" Oct 11 03:59:13 crc kubenswrapper[4798]: I1011 03:59:13.043032 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5315221c-e6ab-4c3f-9d74-5bf456a5b78e-catalog-content\") pod \"redhat-operators-h4bsh\" (UID: \"5315221c-e6ab-4c3f-9d74-5bf456a5b78e\") " pod="openshift-marketplace/redhat-operators-h4bsh" Oct 11 03:59:13 crc kubenswrapper[4798]: I1011 03:59:13.070618 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-whdb4\" (UniqueName: \"kubernetes.io/projected/5315221c-e6ab-4c3f-9d74-5bf456a5b78e-kube-api-access-whdb4\") pod \"redhat-operators-h4bsh\" (UID: \"5315221c-e6ab-4c3f-9d74-5bf456a5b78e\") " pod="openshift-marketplace/redhat-operators-h4bsh" Oct 11 03:59:13 crc kubenswrapper[4798]: I1011 03:59:13.116011 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-h4bsh" Oct 11 03:59:13 crc kubenswrapper[4798]: I1011 03:59:13.237369 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-v8lpr"] Oct 11 03:59:13 crc kubenswrapper[4798]: W1011 03:59:13.249710 4798 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod462b253d_c42e_4640_acdc_defc5c34032f.slice/crio-37a28ea3fd10dee6235db0c32fa9660b9ed362c5f06b74fe8cbe34cd9f5a835d WatchSource:0}: Error finding container 37a28ea3fd10dee6235db0c32fa9660b9ed362c5f06b74fe8cbe34cd9f5a835d: Status 404 returned error can't find the container with id 37a28ea3fd10dee6235db0c32fa9660b9ed362c5f06b74fe8cbe34cd9f5a835d Oct 11 03:59:13 crc kubenswrapper[4798]: I1011 03:59:13.354820 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-h4bsh"] Oct 11 03:59:13 crc kubenswrapper[4798]: W1011 03:59:13.359175 4798 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5315221c_e6ab_4c3f_9d74_5bf456a5b78e.slice/crio-b9030293265d48a82e7897e60288d7d6577464dd81e82b67980a5b991c595253 WatchSource:0}: Error finding container b9030293265d48a82e7897e60288d7d6577464dd81e82b67980a5b991c595253: Status 404 returned error can't find the container with id b9030293265d48a82e7897e60288d7d6577464dd81e82b67980a5b991c595253 Oct 11 03:59:13 crc kubenswrapper[4798]: I1011 03:59:13.433559 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="08878c03-1346-41ac-9f4f-874cd48e2129" path="/var/lib/kubelet/pods/08878c03-1346-41ac-9f4f-874cd48e2129/volumes" Oct 11 03:59:13 crc kubenswrapper[4798]: I1011 03:59:13.435164 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="45c5e131-b2e8-4a3f-8c26-0dfe5cacee11" path="/var/lib/kubelet/pods/45c5e131-b2e8-4a3f-8c26-0dfe5cacee11/volumes" Oct 11 03:59:13 crc kubenswrapper[4798]: I1011 03:59:13.435774 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5df1000c-7c15-48cd-adcc-0c286e651fad" path="/var/lib/kubelet/pods/5df1000c-7c15-48cd-adcc-0c286e651fad/volumes" Oct 11 03:59:13 crc kubenswrapper[4798]: I1011 03:59:13.437187 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c27e187f-5d34-4455-9a63-8e3f213139bc" path="/var/lib/kubelet/pods/c27e187f-5d34-4455-9a63-8e3f213139bc/volumes" Oct 11 03:59:13 crc kubenswrapper[4798]: I1011 03:59:13.438103 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="de740119-5acc-48d0-aaa4-0a64e054cac8" path="/var/lib/kubelet/pods/de740119-5acc-48d0-aaa4-0a64e054cac8/volumes" Oct 11 03:59:13 crc kubenswrapper[4798]: I1011 03:59:13.444630 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-h4bsh" event={"ID":"5315221c-e6ab-4c3f-9d74-5bf456a5b78e","Type":"ContainerStarted","Data":"b9030293265d48a82e7897e60288d7d6577464dd81e82b67980a5b991c595253"} Oct 11 03:59:13 crc kubenswrapper[4798]: I1011 03:59:13.446448 4798 generic.go:334] "Generic (PLEG): container finished" podID="462b253d-c42e-4640-acdc-defc5c34032f" containerID="5d09c384b9d0705a66b2d6e64c71cdfeaf14bc6ebc36f3a6f09886e818008345" exitCode=0 Oct 11 03:59:13 crc kubenswrapper[4798]: I1011 03:59:13.446552 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-v8lpr" event={"ID":"462b253d-c42e-4640-acdc-defc5c34032f","Type":"ContainerDied","Data":"5d09c384b9d0705a66b2d6e64c71cdfeaf14bc6ebc36f3a6f09886e818008345"} Oct 11 03:59:13 crc kubenswrapper[4798]: I1011 03:59:13.446621 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-v8lpr" event={"ID":"462b253d-c42e-4640-acdc-defc5c34032f","Type":"ContainerStarted","Data":"37a28ea3fd10dee6235db0c32fa9660b9ed362c5f06b74fe8cbe34cd9f5a835d"} Oct 11 03:59:14 crc kubenswrapper[4798]: I1011 03:59:14.452108 4798 generic.go:334] "Generic (PLEG): container finished" podID="5315221c-e6ab-4c3f-9d74-5bf456a5b78e" containerID="b9011aace268dd11720c0c6882509390a141d261c74d7ba571d8844932385810" exitCode=0 Oct 11 03:59:14 crc kubenswrapper[4798]: I1011 03:59:14.452204 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-h4bsh" event={"ID":"5315221c-e6ab-4c3f-9d74-5bf456a5b78e","Type":"ContainerDied","Data":"b9011aace268dd11720c0c6882509390a141d261c74d7ba571d8844932385810"} Oct 11 03:59:15 crc kubenswrapper[4798]: I1011 03:59:14.998262 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-pbct9"] Oct 11 03:59:15 crc kubenswrapper[4798]: I1011 03:59:15.001882 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-pbct9" Oct 11 03:59:15 crc kubenswrapper[4798]: I1011 03:59:15.004276 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-pbct9"] Oct 11 03:59:15 crc kubenswrapper[4798]: I1011 03:59:15.004876 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Oct 11 03:59:15 crc kubenswrapper[4798]: I1011 03:59:15.078476 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9c0c7752-53e3-489c-b668-370e654f482a-catalog-content\") pod \"certified-operators-pbct9\" (UID: \"9c0c7752-53e3-489c-b668-370e654f482a\") " pod="openshift-marketplace/certified-operators-pbct9" Oct 11 03:59:15 crc kubenswrapper[4798]: I1011 03:59:15.078980 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dpxpj\" (UniqueName: \"kubernetes.io/projected/9c0c7752-53e3-489c-b668-370e654f482a-kube-api-access-dpxpj\") pod \"certified-operators-pbct9\" (UID: \"9c0c7752-53e3-489c-b668-370e654f482a\") " pod="openshift-marketplace/certified-operators-pbct9" Oct 11 03:59:15 crc kubenswrapper[4798]: I1011 03:59:15.079152 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9c0c7752-53e3-489c-b668-370e654f482a-utilities\") pod \"certified-operators-pbct9\" (UID: \"9c0c7752-53e3-489c-b668-370e654f482a\") " pod="openshift-marketplace/certified-operators-pbct9" Oct 11 03:59:15 crc kubenswrapper[4798]: I1011 03:59:15.180370 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dpxpj\" (UniqueName: \"kubernetes.io/projected/9c0c7752-53e3-489c-b668-370e654f482a-kube-api-access-dpxpj\") pod \"certified-operators-pbct9\" (UID: \"9c0c7752-53e3-489c-b668-370e654f482a\") " pod="openshift-marketplace/certified-operators-pbct9" Oct 11 03:59:15 crc kubenswrapper[4798]: I1011 03:59:15.180461 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9c0c7752-53e3-489c-b668-370e654f482a-utilities\") pod \"certified-operators-pbct9\" (UID: \"9c0c7752-53e3-489c-b668-370e654f482a\") " pod="openshift-marketplace/certified-operators-pbct9" Oct 11 03:59:15 crc kubenswrapper[4798]: I1011 03:59:15.180507 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9c0c7752-53e3-489c-b668-370e654f482a-catalog-content\") pod \"certified-operators-pbct9\" (UID: \"9c0c7752-53e3-489c-b668-370e654f482a\") " pod="openshift-marketplace/certified-operators-pbct9" Oct 11 03:59:15 crc kubenswrapper[4798]: I1011 03:59:15.180946 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9c0c7752-53e3-489c-b668-370e654f482a-catalog-content\") pod \"certified-operators-pbct9\" (UID: \"9c0c7752-53e3-489c-b668-370e654f482a\") " pod="openshift-marketplace/certified-operators-pbct9" Oct 11 03:59:15 crc kubenswrapper[4798]: I1011 03:59:15.181185 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9c0c7752-53e3-489c-b668-370e654f482a-utilities\") pod \"certified-operators-pbct9\" (UID: \"9c0c7752-53e3-489c-b668-370e654f482a\") " pod="openshift-marketplace/certified-operators-pbct9" Oct 11 03:59:15 crc kubenswrapper[4798]: I1011 03:59:15.201217 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-drk2l"] Oct 11 03:59:15 crc kubenswrapper[4798]: I1011 03:59:15.202511 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-drk2l" Oct 11 03:59:15 crc kubenswrapper[4798]: I1011 03:59:15.208300 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Oct 11 03:59:15 crc kubenswrapper[4798]: I1011 03:59:15.212465 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-drk2l"] Oct 11 03:59:15 crc kubenswrapper[4798]: I1011 03:59:15.213619 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dpxpj\" (UniqueName: \"kubernetes.io/projected/9c0c7752-53e3-489c-b668-370e654f482a-kube-api-access-dpxpj\") pod \"certified-operators-pbct9\" (UID: \"9c0c7752-53e3-489c-b668-370e654f482a\") " pod="openshift-marketplace/certified-operators-pbct9" Oct 11 03:59:15 crc kubenswrapper[4798]: I1011 03:59:15.282198 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dfc6a2a4-b981-4d28-be73-8d6689b028cd-catalog-content\") pod \"community-operators-drk2l\" (UID: \"dfc6a2a4-b981-4d28-be73-8d6689b028cd\") " pod="openshift-marketplace/community-operators-drk2l" Oct 11 03:59:15 crc kubenswrapper[4798]: I1011 03:59:15.282269 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dfc6a2a4-b981-4d28-be73-8d6689b028cd-utilities\") pod \"community-operators-drk2l\" (UID: \"dfc6a2a4-b981-4d28-be73-8d6689b028cd\") " pod="openshift-marketplace/community-operators-drk2l" Oct 11 03:59:15 crc kubenswrapper[4798]: I1011 03:59:15.282340 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l5kh7\" (UniqueName: \"kubernetes.io/projected/dfc6a2a4-b981-4d28-be73-8d6689b028cd-kube-api-access-l5kh7\") pod \"community-operators-drk2l\" (UID: \"dfc6a2a4-b981-4d28-be73-8d6689b028cd\") " pod="openshift-marketplace/community-operators-drk2l" Oct 11 03:59:15 crc kubenswrapper[4798]: I1011 03:59:15.327267 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-pbct9" Oct 11 03:59:15 crc kubenswrapper[4798]: I1011 03:59:15.383940 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dfc6a2a4-b981-4d28-be73-8d6689b028cd-catalog-content\") pod \"community-operators-drk2l\" (UID: \"dfc6a2a4-b981-4d28-be73-8d6689b028cd\") " pod="openshift-marketplace/community-operators-drk2l" Oct 11 03:59:15 crc kubenswrapper[4798]: I1011 03:59:15.384355 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dfc6a2a4-b981-4d28-be73-8d6689b028cd-utilities\") pod \"community-operators-drk2l\" (UID: \"dfc6a2a4-b981-4d28-be73-8d6689b028cd\") " pod="openshift-marketplace/community-operators-drk2l" Oct 11 03:59:15 crc kubenswrapper[4798]: I1011 03:59:15.384432 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l5kh7\" (UniqueName: \"kubernetes.io/projected/dfc6a2a4-b981-4d28-be73-8d6689b028cd-kube-api-access-l5kh7\") pod \"community-operators-drk2l\" (UID: \"dfc6a2a4-b981-4d28-be73-8d6689b028cd\") " pod="openshift-marketplace/community-operators-drk2l" Oct 11 03:59:15 crc kubenswrapper[4798]: I1011 03:59:15.384925 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dfc6a2a4-b981-4d28-be73-8d6689b028cd-catalog-content\") pod \"community-operators-drk2l\" (UID: \"dfc6a2a4-b981-4d28-be73-8d6689b028cd\") " pod="openshift-marketplace/community-operators-drk2l" Oct 11 03:59:15 crc kubenswrapper[4798]: I1011 03:59:15.385187 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dfc6a2a4-b981-4d28-be73-8d6689b028cd-utilities\") pod \"community-operators-drk2l\" (UID: \"dfc6a2a4-b981-4d28-be73-8d6689b028cd\") " pod="openshift-marketplace/community-operators-drk2l" Oct 11 03:59:15 crc kubenswrapper[4798]: I1011 03:59:15.409050 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l5kh7\" (UniqueName: \"kubernetes.io/projected/dfc6a2a4-b981-4d28-be73-8d6689b028cd-kube-api-access-l5kh7\") pod \"community-operators-drk2l\" (UID: \"dfc6a2a4-b981-4d28-be73-8d6689b028cd\") " pod="openshift-marketplace/community-operators-drk2l" Oct 11 03:59:15 crc kubenswrapper[4798]: I1011 03:59:15.460130 4798 generic.go:334] "Generic (PLEG): container finished" podID="462b253d-c42e-4640-acdc-defc5c34032f" containerID="df92add01d8bbfa9bcd871ac677b68e6d098746dbc8c7f794da95801f55cacb3" exitCode=0 Oct 11 03:59:15 crc kubenswrapper[4798]: I1011 03:59:15.460204 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-v8lpr" event={"ID":"462b253d-c42e-4640-acdc-defc5c34032f","Type":"ContainerDied","Data":"df92add01d8bbfa9bcd871ac677b68e6d098746dbc8c7f794da95801f55cacb3"} Oct 11 03:59:15 crc kubenswrapper[4798]: I1011 03:59:15.547268 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-drk2l" Oct 11 03:59:15 crc kubenswrapper[4798]: I1011 03:59:15.764691 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-drk2l"] Oct 11 03:59:15 crc kubenswrapper[4798]: I1011 03:59:15.767463 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-pbct9"] Oct 11 03:59:16 crc kubenswrapper[4798]: I1011 03:59:16.469378 4798 generic.go:334] "Generic (PLEG): container finished" podID="9c0c7752-53e3-489c-b668-370e654f482a" containerID="7145e3ba8d6ab597f544594931935938d4c4e9d844963382295c7773cf7ab12b" exitCode=0 Oct 11 03:59:16 crc kubenswrapper[4798]: I1011 03:59:16.469466 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-pbct9" event={"ID":"9c0c7752-53e3-489c-b668-370e654f482a","Type":"ContainerDied","Data":"7145e3ba8d6ab597f544594931935938d4c4e9d844963382295c7773cf7ab12b"} Oct 11 03:59:16 crc kubenswrapper[4798]: I1011 03:59:16.470160 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-pbct9" event={"ID":"9c0c7752-53e3-489c-b668-370e654f482a","Type":"ContainerStarted","Data":"f48008a0673cb37f3cc9e45ae9678c64cec87fe10620150460c6e46c6eed669e"} Oct 11 03:59:16 crc kubenswrapper[4798]: I1011 03:59:16.474084 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-v8lpr" event={"ID":"462b253d-c42e-4640-acdc-defc5c34032f","Type":"ContainerStarted","Data":"6c8c8ab9f8d0ad1b46780a3703189d293677de0ae2ac8d3f1014ebeaa5c71a5c"} Oct 11 03:59:16 crc kubenswrapper[4798]: I1011 03:59:16.476917 4798 generic.go:334] "Generic (PLEG): container finished" podID="dfc6a2a4-b981-4d28-be73-8d6689b028cd" containerID="94b60d8c5d0d3211da268620698388b3cad29e1ea36e876047e6649b27df377d" exitCode=0 Oct 11 03:59:16 crc kubenswrapper[4798]: I1011 03:59:16.476982 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-drk2l" event={"ID":"dfc6a2a4-b981-4d28-be73-8d6689b028cd","Type":"ContainerDied","Data":"94b60d8c5d0d3211da268620698388b3cad29e1ea36e876047e6649b27df377d"} Oct 11 03:59:16 crc kubenswrapper[4798]: I1011 03:59:16.477002 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-drk2l" event={"ID":"dfc6a2a4-b981-4d28-be73-8d6689b028cd","Type":"ContainerStarted","Data":"296877ddcf30b650b95457e89ddc34cdbb2f80eda4dfd659fd44c666875414f9"} Oct 11 03:59:16 crc kubenswrapper[4798]: I1011 03:59:16.481806 4798 generic.go:334] "Generic (PLEG): container finished" podID="5315221c-e6ab-4c3f-9d74-5bf456a5b78e" containerID="3afb6c6783c64301c8581575d240b5b0e26788102b43614827b45bbc227b35ac" exitCode=0 Oct 11 03:59:16 crc kubenswrapper[4798]: I1011 03:59:16.481847 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-h4bsh" event={"ID":"5315221c-e6ab-4c3f-9d74-5bf456a5b78e","Type":"ContainerDied","Data":"3afb6c6783c64301c8581575d240b5b0e26788102b43614827b45bbc227b35ac"} Oct 11 03:59:16 crc kubenswrapper[4798]: I1011 03:59:16.518726 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-v8lpr" podStartSLOduration=1.805933298 podStartE2EDuration="4.518696742s" podCreationTimestamp="2025-10-11 03:59:12 +0000 UTC" firstStartedPulling="2025-10-11 03:59:13.448258577 +0000 UTC m=+248.784548263" lastFinishedPulling="2025-10-11 03:59:16.161022021 +0000 UTC m=+251.497311707" observedRunningTime="2025-10-11 03:59:16.514084465 +0000 UTC m=+251.850374151" watchObservedRunningTime="2025-10-11 03:59:16.518696742 +0000 UTC m=+251.854986448" Oct 11 03:59:17 crc kubenswrapper[4798]: I1011 03:59:17.492584 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-h4bsh" event={"ID":"5315221c-e6ab-4c3f-9d74-5bf456a5b78e","Type":"ContainerStarted","Data":"c20ffba49a614ce95f764ceffde328a8a00aba50023af9704f71dc03bd6e11e2"} Oct 11 03:59:17 crc kubenswrapper[4798]: I1011 03:59:17.497362 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-pbct9" event={"ID":"9c0c7752-53e3-489c-b668-370e654f482a","Type":"ContainerStarted","Data":"8f9ba80fbc907b689e628090ca1db69a5ef75f5a35b72cb7a1c647cf7e33a688"} Oct 11 03:59:17 crc kubenswrapper[4798]: I1011 03:59:17.520187 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-h4bsh" podStartSLOduration=3.045396627 podStartE2EDuration="5.520161505s" podCreationTimestamp="2025-10-11 03:59:12 +0000 UTC" firstStartedPulling="2025-10-11 03:59:14.454089259 +0000 UTC m=+249.790378945" lastFinishedPulling="2025-10-11 03:59:16.928854137 +0000 UTC m=+252.265143823" observedRunningTime="2025-10-11 03:59:17.517856217 +0000 UTC m=+252.854145943" watchObservedRunningTime="2025-10-11 03:59:17.520161505 +0000 UTC m=+252.856451191" Oct 11 03:59:18 crc kubenswrapper[4798]: I1011 03:59:18.505861 4798 generic.go:334] "Generic (PLEG): container finished" podID="9c0c7752-53e3-489c-b668-370e654f482a" containerID="8f9ba80fbc907b689e628090ca1db69a5ef75f5a35b72cb7a1c647cf7e33a688" exitCode=0 Oct 11 03:59:18 crc kubenswrapper[4798]: I1011 03:59:18.505980 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-pbct9" event={"ID":"9c0c7752-53e3-489c-b668-370e654f482a","Type":"ContainerDied","Data":"8f9ba80fbc907b689e628090ca1db69a5ef75f5a35b72cb7a1c647cf7e33a688"} Oct 11 03:59:18 crc kubenswrapper[4798]: I1011 03:59:18.508224 4798 generic.go:334] "Generic (PLEG): container finished" podID="dfc6a2a4-b981-4d28-be73-8d6689b028cd" containerID="6f1cd8a49dfdeeb5f6ff078277d51e83482f9babdac8bb1d6b4db9a393291fb3" exitCode=0 Oct 11 03:59:18 crc kubenswrapper[4798]: I1011 03:59:18.508314 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-drk2l" event={"ID":"dfc6a2a4-b981-4d28-be73-8d6689b028cd","Type":"ContainerDied","Data":"6f1cd8a49dfdeeb5f6ff078277d51e83482f9babdac8bb1d6b4db9a393291fb3"} Oct 11 03:59:19 crc kubenswrapper[4798]: I1011 03:59:19.516080 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-drk2l" event={"ID":"dfc6a2a4-b981-4d28-be73-8d6689b028cd","Type":"ContainerStarted","Data":"c17c81455238e976e53219c7d4c1f90bc91e34094769c0a0c0bdef8e4e41a026"} Oct 11 03:59:19 crc kubenswrapper[4798]: I1011 03:59:19.521852 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-pbct9" event={"ID":"9c0c7752-53e3-489c-b668-370e654f482a","Type":"ContainerStarted","Data":"230e9af2195978b8f95b5bf3e6751f1eaca0b9bda67afa30145933be14c5c9ae"} Oct 11 03:59:19 crc kubenswrapper[4798]: I1011 03:59:19.540209 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-drk2l" podStartSLOduration=1.983568588 podStartE2EDuration="4.540077943s" podCreationTimestamp="2025-10-11 03:59:15 +0000 UTC" firstStartedPulling="2025-10-11 03:59:16.478434196 +0000 UTC m=+251.814723882" lastFinishedPulling="2025-10-11 03:59:19.034943511 +0000 UTC m=+254.371233237" observedRunningTime="2025-10-11 03:59:19.537516437 +0000 UTC m=+254.873806123" watchObservedRunningTime="2025-10-11 03:59:19.540077943 +0000 UTC m=+254.876367629" Oct 11 03:59:19 crc kubenswrapper[4798]: I1011 03:59:19.575313 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-pbct9" podStartSLOduration=2.888425496 podStartE2EDuration="5.575288759s" podCreationTimestamp="2025-10-11 03:59:14 +0000 UTC" firstStartedPulling="2025-10-11 03:59:16.47315058 +0000 UTC m=+251.809440266" lastFinishedPulling="2025-10-11 03:59:19.160013843 +0000 UTC m=+254.496303529" observedRunningTime="2025-10-11 03:59:19.571302012 +0000 UTC m=+254.907591698" watchObservedRunningTime="2025-10-11 03:59:19.575288759 +0000 UTC m=+254.911578445" Oct 11 03:59:22 crc kubenswrapper[4798]: I1011 03:59:22.978187 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-v8lpr" Oct 11 03:59:22 crc kubenswrapper[4798]: I1011 03:59:22.981835 4798 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-v8lpr" Oct 11 03:59:23 crc kubenswrapper[4798]: I1011 03:59:23.043776 4798 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-v8lpr" Oct 11 03:59:23 crc kubenswrapper[4798]: I1011 03:59:23.117078 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-h4bsh" Oct 11 03:59:23 crc kubenswrapper[4798]: I1011 03:59:23.117138 4798 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-h4bsh" Oct 11 03:59:23 crc kubenswrapper[4798]: I1011 03:59:23.164939 4798 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-h4bsh" Oct 11 03:59:23 crc kubenswrapper[4798]: I1011 03:59:23.594300 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-v8lpr" Oct 11 03:59:23 crc kubenswrapper[4798]: I1011 03:59:23.598121 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-h4bsh" Oct 11 03:59:25 crc kubenswrapper[4798]: I1011 03:59:25.204258 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-66df7c8f76-65656" Oct 11 03:59:25 crc kubenswrapper[4798]: I1011 03:59:25.290297 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-bdw6s"] Oct 11 03:59:25 crc kubenswrapper[4798]: I1011 03:59:25.328640 4798 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-pbct9" Oct 11 03:59:25 crc kubenswrapper[4798]: I1011 03:59:25.328708 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-pbct9" Oct 11 03:59:25 crc kubenswrapper[4798]: I1011 03:59:25.372369 4798 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-pbct9" Oct 11 03:59:25 crc kubenswrapper[4798]: I1011 03:59:25.547814 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-drk2l" Oct 11 03:59:25 crc kubenswrapper[4798]: I1011 03:59:25.547943 4798 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-drk2l" Oct 11 03:59:25 crc kubenswrapper[4798]: I1011 03:59:25.608178 4798 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-drk2l" Oct 11 03:59:25 crc kubenswrapper[4798]: I1011 03:59:25.612434 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-pbct9" Oct 11 03:59:26 crc kubenswrapper[4798]: I1011 03:59:26.609131 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-drk2l" Oct 11 03:59:36 crc kubenswrapper[4798]: I1011 03:59:36.781150 4798 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-authentication/oauth-openshift-558db77b4-kw978" podUID="f9bbcb27-587d-4994-8ef7-25b9ac6bf912" containerName="oauth-openshift" containerID="cri-o://34e4dfcfab7e1a16222438fd643cdf2bdab51a95eac1e5a2d75d5c4e5139b403" gracePeriod=15 Oct 11 03:59:37 crc kubenswrapper[4798]: I1011 03:59:37.215310 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-kw978" Oct 11 03:59:37 crc kubenswrapper[4798]: I1011 03:59:37.250682 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-855d4664c5-jlbmz"] Oct 11 03:59:37 crc kubenswrapper[4798]: E1011 03:59:37.251318 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f9bbcb27-587d-4994-8ef7-25b9ac6bf912" containerName="oauth-openshift" Oct 11 03:59:37 crc kubenswrapper[4798]: I1011 03:59:37.251339 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="f9bbcb27-587d-4994-8ef7-25b9ac6bf912" containerName="oauth-openshift" Oct 11 03:59:37 crc kubenswrapper[4798]: I1011 03:59:37.251458 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="f9bbcb27-587d-4994-8ef7-25b9ac6bf912" containerName="oauth-openshift" Oct 11 03:59:37 crc kubenswrapper[4798]: I1011 03:59:37.252795 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-855d4664c5-jlbmz" Oct 11 03:59:37 crc kubenswrapper[4798]: I1011 03:59:37.269203 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-855d4664c5-jlbmz"] Oct 11 03:59:37 crc kubenswrapper[4798]: I1011 03:59:37.342609 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/f9bbcb27-587d-4994-8ef7-25b9ac6bf912-v4-0-config-system-session\") pod \"f9bbcb27-587d-4994-8ef7-25b9ac6bf912\" (UID: \"f9bbcb27-587d-4994-8ef7-25b9ac6bf912\") " Oct 11 03:59:37 crc kubenswrapper[4798]: I1011 03:59:37.342683 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/f9bbcb27-587d-4994-8ef7-25b9ac6bf912-v4-0-config-system-ocp-branding-template\") pod \"f9bbcb27-587d-4994-8ef7-25b9ac6bf912\" (UID: \"f9bbcb27-587d-4994-8ef7-25b9ac6bf912\") " Oct 11 03:59:37 crc kubenswrapper[4798]: I1011 03:59:37.342741 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/f9bbcb27-587d-4994-8ef7-25b9ac6bf912-v4-0-config-system-serving-cert\") pod \"f9bbcb27-587d-4994-8ef7-25b9ac6bf912\" (UID: \"f9bbcb27-587d-4994-8ef7-25b9ac6bf912\") " Oct 11 03:59:37 crc kubenswrapper[4798]: I1011 03:59:37.342770 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/f9bbcb27-587d-4994-8ef7-25b9ac6bf912-v4-0-config-user-idp-0-file-data\") pod \"f9bbcb27-587d-4994-8ef7-25b9ac6bf912\" (UID: \"f9bbcb27-587d-4994-8ef7-25b9ac6bf912\") " Oct 11 03:59:37 crc kubenswrapper[4798]: I1011 03:59:37.342800 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/f9bbcb27-587d-4994-8ef7-25b9ac6bf912-v4-0-config-user-template-error\") pod \"f9bbcb27-587d-4994-8ef7-25b9ac6bf912\" (UID: \"f9bbcb27-587d-4994-8ef7-25b9ac6bf912\") " Oct 11 03:59:37 crc kubenswrapper[4798]: I1011 03:59:37.343367 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6hs4f\" (UniqueName: \"kubernetes.io/projected/f9bbcb27-587d-4994-8ef7-25b9ac6bf912-kube-api-access-6hs4f\") pod \"f9bbcb27-587d-4994-8ef7-25b9ac6bf912\" (UID: \"f9bbcb27-587d-4994-8ef7-25b9ac6bf912\") " Oct 11 03:59:37 crc kubenswrapper[4798]: I1011 03:59:37.343440 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/f9bbcb27-587d-4994-8ef7-25b9ac6bf912-audit-policies\") pod \"f9bbcb27-587d-4994-8ef7-25b9ac6bf912\" (UID: \"f9bbcb27-587d-4994-8ef7-25b9ac6bf912\") " Oct 11 03:59:37 crc kubenswrapper[4798]: I1011 03:59:37.343467 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/f9bbcb27-587d-4994-8ef7-25b9ac6bf912-v4-0-config-user-template-provider-selection\") pod \"f9bbcb27-587d-4994-8ef7-25b9ac6bf912\" (UID: \"f9bbcb27-587d-4994-8ef7-25b9ac6bf912\") " Oct 11 03:59:37 crc kubenswrapper[4798]: I1011 03:59:37.343497 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f9bbcb27-587d-4994-8ef7-25b9ac6bf912-audit-dir\") pod \"f9bbcb27-587d-4994-8ef7-25b9ac6bf912\" (UID: \"f9bbcb27-587d-4994-8ef7-25b9ac6bf912\") " Oct 11 03:59:37 crc kubenswrapper[4798]: I1011 03:59:37.343521 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/f9bbcb27-587d-4994-8ef7-25b9ac6bf912-v4-0-config-system-router-certs\") pod \"f9bbcb27-587d-4994-8ef7-25b9ac6bf912\" (UID: \"f9bbcb27-587d-4994-8ef7-25b9ac6bf912\") " Oct 11 03:59:37 crc kubenswrapper[4798]: I1011 03:59:37.343553 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/f9bbcb27-587d-4994-8ef7-25b9ac6bf912-v4-0-config-user-template-login\") pod \"f9bbcb27-587d-4994-8ef7-25b9ac6bf912\" (UID: \"f9bbcb27-587d-4994-8ef7-25b9ac6bf912\") " Oct 11 03:59:37 crc kubenswrapper[4798]: I1011 03:59:37.343578 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/f9bbcb27-587d-4994-8ef7-25b9ac6bf912-v4-0-config-system-trusted-ca-bundle\") pod \"f9bbcb27-587d-4994-8ef7-25b9ac6bf912\" (UID: \"f9bbcb27-587d-4994-8ef7-25b9ac6bf912\") " Oct 11 03:59:37 crc kubenswrapper[4798]: I1011 03:59:37.343600 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/f9bbcb27-587d-4994-8ef7-25b9ac6bf912-v4-0-config-system-service-ca\") pod \"f9bbcb27-587d-4994-8ef7-25b9ac6bf912\" (UID: \"f9bbcb27-587d-4994-8ef7-25b9ac6bf912\") " Oct 11 03:59:37 crc kubenswrapper[4798]: I1011 03:59:37.343623 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/f9bbcb27-587d-4994-8ef7-25b9ac6bf912-v4-0-config-system-cliconfig\") pod \"f9bbcb27-587d-4994-8ef7-25b9ac6bf912\" (UID: \"f9bbcb27-587d-4994-8ef7-25b9ac6bf912\") " Oct 11 03:59:37 crc kubenswrapper[4798]: I1011 03:59:37.343733 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/a3ea07cb-0b19-4dd6-96aa-d0b74c3fa2cb-audit-dir\") pod \"oauth-openshift-855d4664c5-jlbmz\" (UID: \"a3ea07cb-0b19-4dd6-96aa-d0b74c3fa2cb\") " pod="openshift-authentication/oauth-openshift-855d4664c5-jlbmz" Oct 11 03:59:37 crc kubenswrapper[4798]: I1011 03:59:37.343767 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/a3ea07cb-0b19-4dd6-96aa-d0b74c3fa2cb-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-855d4664c5-jlbmz\" (UID: \"a3ea07cb-0b19-4dd6-96aa-d0b74c3fa2cb\") " pod="openshift-authentication/oauth-openshift-855d4664c5-jlbmz" Oct 11 03:59:37 crc kubenswrapper[4798]: I1011 03:59:37.343795 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j292d\" (UniqueName: \"kubernetes.io/projected/a3ea07cb-0b19-4dd6-96aa-d0b74c3fa2cb-kube-api-access-j292d\") pod \"oauth-openshift-855d4664c5-jlbmz\" (UID: \"a3ea07cb-0b19-4dd6-96aa-d0b74c3fa2cb\") " pod="openshift-authentication/oauth-openshift-855d4664c5-jlbmz" Oct 11 03:59:37 crc kubenswrapper[4798]: I1011 03:59:37.343829 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/a3ea07cb-0b19-4dd6-96aa-d0b74c3fa2cb-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-855d4664c5-jlbmz\" (UID: \"a3ea07cb-0b19-4dd6-96aa-d0b74c3fa2cb\") " pod="openshift-authentication/oauth-openshift-855d4664c5-jlbmz" Oct 11 03:59:37 crc kubenswrapper[4798]: I1011 03:59:37.343854 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/a3ea07cb-0b19-4dd6-96aa-d0b74c3fa2cb-v4-0-config-system-serving-cert\") pod \"oauth-openshift-855d4664c5-jlbmz\" (UID: \"a3ea07cb-0b19-4dd6-96aa-d0b74c3fa2cb\") " pod="openshift-authentication/oauth-openshift-855d4664c5-jlbmz" Oct 11 03:59:37 crc kubenswrapper[4798]: I1011 03:59:37.343875 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/a3ea07cb-0b19-4dd6-96aa-d0b74c3fa2cb-v4-0-config-user-template-login\") pod \"oauth-openshift-855d4664c5-jlbmz\" (UID: \"a3ea07cb-0b19-4dd6-96aa-d0b74c3fa2cb\") " pod="openshift-authentication/oauth-openshift-855d4664c5-jlbmz" Oct 11 03:59:37 crc kubenswrapper[4798]: I1011 03:59:37.343894 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/a3ea07cb-0b19-4dd6-96aa-d0b74c3fa2cb-v4-0-config-user-template-error\") pod \"oauth-openshift-855d4664c5-jlbmz\" (UID: \"a3ea07cb-0b19-4dd6-96aa-d0b74c3fa2cb\") " pod="openshift-authentication/oauth-openshift-855d4664c5-jlbmz" Oct 11 03:59:37 crc kubenswrapper[4798]: I1011 03:59:37.343918 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a3ea07cb-0b19-4dd6-96aa-d0b74c3fa2cb-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-855d4664c5-jlbmz\" (UID: \"a3ea07cb-0b19-4dd6-96aa-d0b74c3fa2cb\") " pod="openshift-authentication/oauth-openshift-855d4664c5-jlbmz" Oct 11 03:59:37 crc kubenswrapper[4798]: I1011 03:59:37.343966 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/a3ea07cb-0b19-4dd6-96aa-d0b74c3fa2cb-v4-0-config-system-session\") pod \"oauth-openshift-855d4664c5-jlbmz\" (UID: \"a3ea07cb-0b19-4dd6-96aa-d0b74c3fa2cb\") " pod="openshift-authentication/oauth-openshift-855d4664c5-jlbmz" Oct 11 03:59:37 crc kubenswrapper[4798]: I1011 03:59:37.345130 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/a3ea07cb-0b19-4dd6-96aa-d0b74c3fa2cb-v4-0-config-system-cliconfig\") pod \"oauth-openshift-855d4664c5-jlbmz\" (UID: \"a3ea07cb-0b19-4dd6-96aa-d0b74c3fa2cb\") " pod="openshift-authentication/oauth-openshift-855d4664c5-jlbmz" Oct 11 03:59:37 crc kubenswrapper[4798]: I1011 03:59:37.345185 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/a3ea07cb-0b19-4dd6-96aa-d0b74c3fa2cb-v4-0-config-system-service-ca\") pod \"oauth-openshift-855d4664c5-jlbmz\" (UID: \"a3ea07cb-0b19-4dd6-96aa-d0b74c3fa2cb\") " pod="openshift-authentication/oauth-openshift-855d4664c5-jlbmz" Oct 11 03:59:37 crc kubenswrapper[4798]: I1011 03:59:37.345233 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/a3ea07cb-0b19-4dd6-96aa-d0b74c3fa2cb-v4-0-config-system-router-certs\") pod \"oauth-openshift-855d4664c5-jlbmz\" (UID: \"a3ea07cb-0b19-4dd6-96aa-d0b74c3fa2cb\") " pod="openshift-authentication/oauth-openshift-855d4664c5-jlbmz" Oct 11 03:59:37 crc kubenswrapper[4798]: I1011 03:59:37.345262 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/a3ea07cb-0b19-4dd6-96aa-d0b74c3fa2cb-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-855d4664c5-jlbmz\" (UID: \"a3ea07cb-0b19-4dd6-96aa-d0b74c3fa2cb\") " pod="openshift-authentication/oauth-openshift-855d4664c5-jlbmz" Oct 11 03:59:37 crc kubenswrapper[4798]: I1011 03:59:37.344156 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f9bbcb27-587d-4994-8ef7-25b9ac6bf912-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "f9bbcb27-587d-4994-8ef7-25b9ac6bf912" (UID: "f9bbcb27-587d-4994-8ef7-25b9ac6bf912"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 11 03:59:37 crc kubenswrapper[4798]: I1011 03:59:37.345308 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f9bbcb27-587d-4994-8ef7-25b9ac6bf912-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "f9bbcb27-587d-4994-8ef7-25b9ac6bf912" (UID: "f9bbcb27-587d-4994-8ef7-25b9ac6bf912"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 03:59:37 crc kubenswrapper[4798]: I1011 03:59:37.345280 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f9bbcb27-587d-4994-8ef7-25b9ac6bf912-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "f9bbcb27-587d-4994-8ef7-25b9ac6bf912" (UID: "f9bbcb27-587d-4994-8ef7-25b9ac6bf912"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 03:59:37 crc kubenswrapper[4798]: I1011 03:59:37.345311 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/a3ea07cb-0b19-4dd6-96aa-d0b74c3fa2cb-audit-policies\") pod \"oauth-openshift-855d4664c5-jlbmz\" (UID: \"a3ea07cb-0b19-4dd6-96aa-d0b74c3fa2cb\") " pod="openshift-authentication/oauth-openshift-855d4664c5-jlbmz" Oct 11 03:59:37 crc kubenswrapper[4798]: I1011 03:59:37.345663 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f9bbcb27-587d-4994-8ef7-25b9ac6bf912-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "f9bbcb27-587d-4994-8ef7-25b9ac6bf912" (UID: "f9bbcb27-587d-4994-8ef7-25b9ac6bf912"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 03:59:37 crc kubenswrapper[4798]: I1011 03:59:37.345681 4798 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/f9bbcb27-587d-4994-8ef7-25b9ac6bf912-audit-policies\") on node \"crc\" DevicePath \"\"" Oct 11 03:59:37 crc kubenswrapper[4798]: I1011 03:59:37.345737 4798 reconciler_common.go:293] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f9bbcb27-587d-4994-8ef7-25b9ac6bf912-audit-dir\") on node \"crc\" DevicePath \"\"" Oct 11 03:59:37 crc kubenswrapper[4798]: I1011 03:59:37.345752 4798 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/f9bbcb27-587d-4994-8ef7-25b9ac6bf912-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 11 03:59:37 crc kubenswrapper[4798]: I1011 03:59:37.345889 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f9bbcb27-587d-4994-8ef7-25b9ac6bf912-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "f9bbcb27-587d-4994-8ef7-25b9ac6bf912" (UID: "f9bbcb27-587d-4994-8ef7-25b9ac6bf912"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 03:59:37 crc kubenswrapper[4798]: I1011 03:59:37.352809 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f9bbcb27-587d-4994-8ef7-25b9ac6bf912-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "f9bbcb27-587d-4994-8ef7-25b9ac6bf912" (UID: "f9bbcb27-587d-4994-8ef7-25b9ac6bf912"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 03:59:37 crc kubenswrapper[4798]: I1011 03:59:37.353843 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f9bbcb27-587d-4994-8ef7-25b9ac6bf912-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "f9bbcb27-587d-4994-8ef7-25b9ac6bf912" (UID: "f9bbcb27-587d-4994-8ef7-25b9ac6bf912"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 03:59:37 crc kubenswrapper[4798]: I1011 03:59:37.355353 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f9bbcb27-587d-4994-8ef7-25b9ac6bf912-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "f9bbcb27-587d-4994-8ef7-25b9ac6bf912" (UID: "f9bbcb27-587d-4994-8ef7-25b9ac6bf912"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 03:59:37 crc kubenswrapper[4798]: I1011 03:59:37.355507 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f9bbcb27-587d-4994-8ef7-25b9ac6bf912-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "f9bbcb27-587d-4994-8ef7-25b9ac6bf912" (UID: "f9bbcb27-587d-4994-8ef7-25b9ac6bf912"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 03:59:37 crc kubenswrapper[4798]: I1011 03:59:37.355542 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f9bbcb27-587d-4994-8ef7-25b9ac6bf912-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "f9bbcb27-587d-4994-8ef7-25b9ac6bf912" (UID: "f9bbcb27-587d-4994-8ef7-25b9ac6bf912"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 03:59:37 crc kubenswrapper[4798]: I1011 03:59:37.355832 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f9bbcb27-587d-4994-8ef7-25b9ac6bf912-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "f9bbcb27-587d-4994-8ef7-25b9ac6bf912" (UID: "f9bbcb27-587d-4994-8ef7-25b9ac6bf912"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 03:59:37 crc kubenswrapper[4798]: I1011 03:59:37.355953 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f9bbcb27-587d-4994-8ef7-25b9ac6bf912-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "f9bbcb27-587d-4994-8ef7-25b9ac6bf912" (UID: "f9bbcb27-587d-4994-8ef7-25b9ac6bf912"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 03:59:37 crc kubenswrapper[4798]: I1011 03:59:37.357212 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f9bbcb27-587d-4994-8ef7-25b9ac6bf912-kube-api-access-6hs4f" (OuterVolumeSpecName: "kube-api-access-6hs4f") pod "f9bbcb27-587d-4994-8ef7-25b9ac6bf912" (UID: "f9bbcb27-587d-4994-8ef7-25b9ac6bf912"). InnerVolumeSpecName "kube-api-access-6hs4f". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 03:59:37 crc kubenswrapper[4798]: I1011 03:59:37.358104 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f9bbcb27-587d-4994-8ef7-25b9ac6bf912-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "f9bbcb27-587d-4994-8ef7-25b9ac6bf912" (UID: "f9bbcb27-587d-4994-8ef7-25b9ac6bf912"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 03:59:37 crc kubenswrapper[4798]: I1011 03:59:37.447761 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/a3ea07cb-0b19-4dd6-96aa-d0b74c3fa2cb-audit-policies\") pod \"oauth-openshift-855d4664c5-jlbmz\" (UID: \"a3ea07cb-0b19-4dd6-96aa-d0b74c3fa2cb\") " pod="openshift-authentication/oauth-openshift-855d4664c5-jlbmz" Oct 11 03:59:37 crc kubenswrapper[4798]: I1011 03:59:37.447827 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/a3ea07cb-0b19-4dd6-96aa-d0b74c3fa2cb-audit-dir\") pod \"oauth-openshift-855d4664c5-jlbmz\" (UID: \"a3ea07cb-0b19-4dd6-96aa-d0b74c3fa2cb\") " pod="openshift-authentication/oauth-openshift-855d4664c5-jlbmz" Oct 11 03:59:37 crc kubenswrapper[4798]: I1011 03:59:37.447856 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/a3ea07cb-0b19-4dd6-96aa-d0b74c3fa2cb-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-855d4664c5-jlbmz\" (UID: \"a3ea07cb-0b19-4dd6-96aa-d0b74c3fa2cb\") " pod="openshift-authentication/oauth-openshift-855d4664c5-jlbmz" Oct 11 03:59:37 crc kubenswrapper[4798]: I1011 03:59:37.447897 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j292d\" (UniqueName: \"kubernetes.io/projected/a3ea07cb-0b19-4dd6-96aa-d0b74c3fa2cb-kube-api-access-j292d\") pod \"oauth-openshift-855d4664c5-jlbmz\" (UID: \"a3ea07cb-0b19-4dd6-96aa-d0b74c3fa2cb\") " pod="openshift-authentication/oauth-openshift-855d4664c5-jlbmz" Oct 11 03:59:37 crc kubenswrapper[4798]: I1011 03:59:37.447953 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/a3ea07cb-0b19-4dd6-96aa-d0b74c3fa2cb-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-855d4664c5-jlbmz\" (UID: \"a3ea07cb-0b19-4dd6-96aa-d0b74c3fa2cb\") " pod="openshift-authentication/oauth-openshift-855d4664c5-jlbmz" Oct 11 03:59:37 crc kubenswrapper[4798]: I1011 03:59:37.447990 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/a3ea07cb-0b19-4dd6-96aa-d0b74c3fa2cb-v4-0-config-system-serving-cert\") pod \"oauth-openshift-855d4664c5-jlbmz\" (UID: \"a3ea07cb-0b19-4dd6-96aa-d0b74c3fa2cb\") " pod="openshift-authentication/oauth-openshift-855d4664c5-jlbmz" Oct 11 03:59:37 crc kubenswrapper[4798]: I1011 03:59:37.448006 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/a3ea07cb-0b19-4dd6-96aa-d0b74c3fa2cb-audit-dir\") pod \"oauth-openshift-855d4664c5-jlbmz\" (UID: \"a3ea07cb-0b19-4dd6-96aa-d0b74c3fa2cb\") " pod="openshift-authentication/oauth-openshift-855d4664c5-jlbmz" Oct 11 03:59:37 crc kubenswrapper[4798]: I1011 03:59:37.448020 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/a3ea07cb-0b19-4dd6-96aa-d0b74c3fa2cb-v4-0-config-user-template-login\") pod \"oauth-openshift-855d4664c5-jlbmz\" (UID: \"a3ea07cb-0b19-4dd6-96aa-d0b74c3fa2cb\") " pod="openshift-authentication/oauth-openshift-855d4664c5-jlbmz" Oct 11 03:59:37 crc kubenswrapper[4798]: I1011 03:59:37.448531 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/a3ea07cb-0b19-4dd6-96aa-d0b74c3fa2cb-v4-0-config-user-template-error\") pod \"oauth-openshift-855d4664c5-jlbmz\" (UID: \"a3ea07cb-0b19-4dd6-96aa-d0b74c3fa2cb\") " pod="openshift-authentication/oauth-openshift-855d4664c5-jlbmz" Oct 11 03:59:37 crc kubenswrapper[4798]: I1011 03:59:37.448568 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a3ea07cb-0b19-4dd6-96aa-d0b74c3fa2cb-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-855d4664c5-jlbmz\" (UID: \"a3ea07cb-0b19-4dd6-96aa-d0b74c3fa2cb\") " pod="openshift-authentication/oauth-openshift-855d4664c5-jlbmz" Oct 11 03:59:37 crc kubenswrapper[4798]: I1011 03:59:37.448619 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/a3ea07cb-0b19-4dd6-96aa-d0b74c3fa2cb-v4-0-config-system-session\") pod \"oauth-openshift-855d4664c5-jlbmz\" (UID: \"a3ea07cb-0b19-4dd6-96aa-d0b74c3fa2cb\") " pod="openshift-authentication/oauth-openshift-855d4664c5-jlbmz" Oct 11 03:59:37 crc kubenswrapper[4798]: I1011 03:59:37.448647 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/a3ea07cb-0b19-4dd6-96aa-d0b74c3fa2cb-v4-0-config-system-cliconfig\") pod \"oauth-openshift-855d4664c5-jlbmz\" (UID: \"a3ea07cb-0b19-4dd6-96aa-d0b74c3fa2cb\") " pod="openshift-authentication/oauth-openshift-855d4664c5-jlbmz" Oct 11 03:59:37 crc kubenswrapper[4798]: I1011 03:59:37.448685 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/a3ea07cb-0b19-4dd6-96aa-d0b74c3fa2cb-v4-0-config-system-service-ca\") pod \"oauth-openshift-855d4664c5-jlbmz\" (UID: \"a3ea07cb-0b19-4dd6-96aa-d0b74c3fa2cb\") " pod="openshift-authentication/oauth-openshift-855d4664c5-jlbmz" Oct 11 03:59:37 crc kubenswrapper[4798]: I1011 03:59:37.448731 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/a3ea07cb-0b19-4dd6-96aa-d0b74c3fa2cb-v4-0-config-system-router-certs\") pod \"oauth-openshift-855d4664c5-jlbmz\" (UID: \"a3ea07cb-0b19-4dd6-96aa-d0b74c3fa2cb\") " pod="openshift-authentication/oauth-openshift-855d4664c5-jlbmz" Oct 11 03:59:37 crc kubenswrapper[4798]: I1011 03:59:37.448765 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/a3ea07cb-0b19-4dd6-96aa-d0b74c3fa2cb-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-855d4664c5-jlbmz\" (UID: \"a3ea07cb-0b19-4dd6-96aa-d0b74c3fa2cb\") " pod="openshift-authentication/oauth-openshift-855d4664c5-jlbmz" Oct 11 03:59:37 crc kubenswrapper[4798]: I1011 03:59:37.448843 4798 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/f9bbcb27-587d-4994-8ef7-25b9ac6bf912-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Oct 11 03:59:37 crc kubenswrapper[4798]: I1011 03:59:37.448863 4798 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/f9bbcb27-587d-4994-8ef7-25b9ac6bf912-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Oct 11 03:59:37 crc kubenswrapper[4798]: I1011 03:59:37.448880 4798 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/f9bbcb27-587d-4994-8ef7-25b9ac6bf912-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Oct 11 03:59:37 crc kubenswrapper[4798]: I1011 03:59:37.448898 4798 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/f9bbcb27-587d-4994-8ef7-25b9ac6bf912-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Oct 11 03:59:37 crc kubenswrapper[4798]: I1011 03:59:37.448913 4798 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/f9bbcb27-587d-4994-8ef7-25b9ac6bf912-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Oct 11 03:59:37 crc kubenswrapper[4798]: I1011 03:59:37.448929 4798 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/f9bbcb27-587d-4994-8ef7-25b9ac6bf912-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Oct 11 03:59:37 crc kubenswrapper[4798]: I1011 03:59:37.448946 4798 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/f9bbcb27-587d-4994-8ef7-25b9ac6bf912-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Oct 11 03:59:37 crc kubenswrapper[4798]: I1011 03:59:37.448963 4798 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/f9bbcb27-587d-4994-8ef7-25b9ac6bf912-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Oct 11 03:59:37 crc kubenswrapper[4798]: I1011 03:59:37.448978 4798 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/f9bbcb27-587d-4994-8ef7-25b9ac6bf912-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Oct 11 03:59:37 crc kubenswrapper[4798]: I1011 03:59:37.448998 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6hs4f\" (UniqueName: \"kubernetes.io/projected/f9bbcb27-587d-4994-8ef7-25b9ac6bf912-kube-api-access-6hs4f\") on node \"crc\" DevicePath \"\"" Oct 11 03:59:37 crc kubenswrapper[4798]: I1011 03:59:37.449013 4798 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/f9bbcb27-587d-4994-8ef7-25b9ac6bf912-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Oct 11 03:59:37 crc kubenswrapper[4798]: I1011 03:59:37.450909 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/a3ea07cb-0b19-4dd6-96aa-d0b74c3fa2cb-v4-0-config-system-service-ca\") pod \"oauth-openshift-855d4664c5-jlbmz\" (UID: \"a3ea07cb-0b19-4dd6-96aa-d0b74c3fa2cb\") " pod="openshift-authentication/oauth-openshift-855d4664c5-jlbmz" Oct 11 03:59:37 crc kubenswrapper[4798]: I1011 03:59:37.450973 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/a3ea07cb-0b19-4dd6-96aa-d0b74c3fa2cb-v4-0-config-system-cliconfig\") pod \"oauth-openshift-855d4664c5-jlbmz\" (UID: \"a3ea07cb-0b19-4dd6-96aa-d0b74c3fa2cb\") " pod="openshift-authentication/oauth-openshift-855d4664c5-jlbmz" Oct 11 03:59:37 crc kubenswrapper[4798]: I1011 03:59:37.452092 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a3ea07cb-0b19-4dd6-96aa-d0b74c3fa2cb-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-855d4664c5-jlbmz\" (UID: \"a3ea07cb-0b19-4dd6-96aa-d0b74c3fa2cb\") " pod="openshift-authentication/oauth-openshift-855d4664c5-jlbmz" Oct 11 03:59:37 crc kubenswrapper[4798]: I1011 03:59:37.452753 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/a3ea07cb-0b19-4dd6-96aa-d0b74c3fa2cb-audit-policies\") pod \"oauth-openshift-855d4664c5-jlbmz\" (UID: \"a3ea07cb-0b19-4dd6-96aa-d0b74c3fa2cb\") " pod="openshift-authentication/oauth-openshift-855d4664c5-jlbmz" Oct 11 03:59:37 crc kubenswrapper[4798]: I1011 03:59:37.453923 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/a3ea07cb-0b19-4dd6-96aa-d0b74c3fa2cb-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-855d4664c5-jlbmz\" (UID: \"a3ea07cb-0b19-4dd6-96aa-d0b74c3fa2cb\") " pod="openshift-authentication/oauth-openshift-855d4664c5-jlbmz" Oct 11 03:59:37 crc kubenswrapper[4798]: I1011 03:59:37.453936 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/a3ea07cb-0b19-4dd6-96aa-d0b74c3fa2cb-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-855d4664c5-jlbmz\" (UID: \"a3ea07cb-0b19-4dd6-96aa-d0b74c3fa2cb\") " pod="openshift-authentication/oauth-openshift-855d4664c5-jlbmz" Oct 11 03:59:37 crc kubenswrapper[4798]: I1011 03:59:37.454065 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/a3ea07cb-0b19-4dd6-96aa-d0b74c3fa2cb-v4-0-config-user-template-login\") pod \"oauth-openshift-855d4664c5-jlbmz\" (UID: \"a3ea07cb-0b19-4dd6-96aa-d0b74c3fa2cb\") " pod="openshift-authentication/oauth-openshift-855d4664c5-jlbmz" Oct 11 03:59:37 crc kubenswrapper[4798]: I1011 03:59:37.454642 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/a3ea07cb-0b19-4dd6-96aa-d0b74c3fa2cb-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-855d4664c5-jlbmz\" (UID: \"a3ea07cb-0b19-4dd6-96aa-d0b74c3fa2cb\") " pod="openshift-authentication/oauth-openshift-855d4664c5-jlbmz" Oct 11 03:59:37 crc kubenswrapper[4798]: I1011 03:59:37.457302 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/a3ea07cb-0b19-4dd6-96aa-d0b74c3fa2cb-v4-0-config-system-session\") pod \"oauth-openshift-855d4664c5-jlbmz\" (UID: \"a3ea07cb-0b19-4dd6-96aa-d0b74c3fa2cb\") " pod="openshift-authentication/oauth-openshift-855d4664c5-jlbmz" Oct 11 03:59:37 crc kubenswrapper[4798]: I1011 03:59:37.458311 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/a3ea07cb-0b19-4dd6-96aa-d0b74c3fa2cb-v4-0-config-system-serving-cert\") pod \"oauth-openshift-855d4664c5-jlbmz\" (UID: \"a3ea07cb-0b19-4dd6-96aa-d0b74c3fa2cb\") " pod="openshift-authentication/oauth-openshift-855d4664c5-jlbmz" Oct 11 03:59:37 crc kubenswrapper[4798]: I1011 03:59:37.458547 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/a3ea07cb-0b19-4dd6-96aa-d0b74c3fa2cb-v4-0-config-user-template-error\") pod \"oauth-openshift-855d4664c5-jlbmz\" (UID: \"a3ea07cb-0b19-4dd6-96aa-d0b74c3fa2cb\") " pod="openshift-authentication/oauth-openshift-855d4664c5-jlbmz" Oct 11 03:59:37 crc kubenswrapper[4798]: I1011 03:59:37.459518 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/a3ea07cb-0b19-4dd6-96aa-d0b74c3fa2cb-v4-0-config-system-router-certs\") pod \"oauth-openshift-855d4664c5-jlbmz\" (UID: \"a3ea07cb-0b19-4dd6-96aa-d0b74c3fa2cb\") " pod="openshift-authentication/oauth-openshift-855d4664c5-jlbmz" Oct 11 03:59:37 crc kubenswrapper[4798]: I1011 03:59:37.474138 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j292d\" (UniqueName: \"kubernetes.io/projected/a3ea07cb-0b19-4dd6-96aa-d0b74c3fa2cb-kube-api-access-j292d\") pod \"oauth-openshift-855d4664c5-jlbmz\" (UID: \"a3ea07cb-0b19-4dd6-96aa-d0b74c3fa2cb\") " pod="openshift-authentication/oauth-openshift-855d4664c5-jlbmz" Oct 11 03:59:37 crc kubenswrapper[4798]: I1011 03:59:37.577292 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-855d4664c5-jlbmz" Oct 11 03:59:37 crc kubenswrapper[4798]: I1011 03:59:37.639203 4798 generic.go:334] "Generic (PLEG): container finished" podID="f9bbcb27-587d-4994-8ef7-25b9ac6bf912" containerID="34e4dfcfab7e1a16222438fd643cdf2bdab51a95eac1e5a2d75d5c4e5139b403" exitCode=0 Oct 11 03:59:37 crc kubenswrapper[4798]: I1011 03:59:37.639259 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-kw978" event={"ID":"f9bbcb27-587d-4994-8ef7-25b9ac6bf912","Type":"ContainerDied","Data":"34e4dfcfab7e1a16222438fd643cdf2bdab51a95eac1e5a2d75d5c4e5139b403"} Oct 11 03:59:37 crc kubenswrapper[4798]: I1011 03:59:37.639294 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-kw978" event={"ID":"f9bbcb27-587d-4994-8ef7-25b9ac6bf912","Type":"ContainerDied","Data":"37dc8b9107c92444751e9f9b63d4b2a35cbe29d756d1646ee83c8f63f3bd1580"} Oct 11 03:59:37 crc kubenswrapper[4798]: I1011 03:59:37.639314 4798 scope.go:117] "RemoveContainer" containerID="34e4dfcfab7e1a16222438fd643cdf2bdab51a95eac1e5a2d75d5c4e5139b403" Oct 11 03:59:37 crc kubenswrapper[4798]: I1011 03:59:37.639364 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-kw978" Oct 11 03:59:37 crc kubenswrapper[4798]: I1011 03:59:37.671074 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-kw978"] Oct 11 03:59:37 crc kubenswrapper[4798]: I1011 03:59:37.673244 4798 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-kw978"] Oct 11 03:59:37 crc kubenswrapper[4798]: I1011 03:59:37.688337 4798 scope.go:117] "RemoveContainer" containerID="34e4dfcfab7e1a16222438fd643cdf2bdab51a95eac1e5a2d75d5c4e5139b403" Oct 11 03:59:37 crc kubenswrapper[4798]: E1011 03:59:37.696025 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"34e4dfcfab7e1a16222438fd643cdf2bdab51a95eac1e5a2d75d5c4e5139b403\": container with ID starting with 34e4dfcfab7e1a16222438fd643cdf2bdab51a95eac1e5a2d75d5c4e5139b403 not found: ID does not exist" containerID="34e4dfcfab7e1a16222438fd643cdf2bdab51a95eac1e5a2d75d5c4e5139b403" Oct 11 03:59:37 crc kubenswrapper[4798]: I1011 03:59:37.696291 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"34e4dfcfab7e1a16222438fd643cdf2bdab51a95eac1e5a2d75d5c4e5139b403"} err="failed to get container status \"34e4dfcfab7e1a16222438fd643cdf2bdab51a95eac1e5a2d75d5c4e5139b403\": rpc error: code = NotFound desc = could not find container \"34e4dfcfab7e1a16222438fd643cdf2bdab51a95eac1e5a2d75d5c4e5139b403\": container with ID starting with 34e4dfcfab7e1a16222438fd643cdf2bdab51a95eac1e5a2d75d5c4e5139b403 not found: ID does not exist" Oct 11 03:59:37 crc kubenswrapper[4798]: I1011 03:59:37.888582 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-855d4664c5-jlbmz"] Oct 11 03:59:38 crc kubenswrapper[4798]: I1011 03:59:38.650666 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-855d4664c5-jlbmz" event={"ID":"a3ea07cb-0b19-4dd6-96aa-d0b74c3fa2cb","Type":"ContainerStarted","Data":"6f8431de0d3746a1084fc968c18e2e808b9e16c878a08b489a156b4570ad297f"} Oct 11 03:59:38 crc kubenswrapper[4798]: I1011 03:59:38.651614 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-855d4664c5-jlbmz" event={"ID":"a3ea07cb-0b19-4dd6-96aa-d0b74c3fa2cb","Type":"ContainerStarted","Data":"f8eab09112e24a8c52249c6c073324d94a837ffc12f7dd31271e2f7482ebcf2f"} Oct 11 03:59:38 crc kubenswrapper[4798]: I1011 03:59:38.653073 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-855d4664c5-jlbmz" Oct 11 03:59:38 crc kubenswrapper[4798]: I1011 03:59:38.685076 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-855d4664c5-jlbmz" podStartSLOduration=27.685043243 podStartE2EDuration="27.685043243s" podCreationTimestamp="2025-10-11 03:59:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 03:59:38.679375706 +0000 UTC m=+274.015665402" watchObservedRunningTime="2025-10-11 03:59:38.685043243 +0000 UTC m=+274.021332969" Oct 11 03:59:38 crc kubenswrapper[4798]: I1011 03:59:38.754233 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-855d4664c5-jlbmz" Oct 11 03:59:39 crc kubenswrapper[4798]: I1011 03:59:39.437887 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f9bbcb27-587d-4994-8ef7-25b9ac6bf912" path="/var/lib/kubelet/pods/f9bbcb27-587d-4994-8ef7-25b9ac6bf912/volumes" Oct 11 03:59:50 crc kubenswrapper[4798]: I1011 03:59:50.348042 4798 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-image-registry/image-registry-697d97f7c8-bdw6s" podUID="3de03a8d-34d7-416d-8f1b-330ad429a3b1" containerName="registry" containerID="cri-o://154bffdf90ea7d36f6c03e255c3a7da7395a5d57d71ae8bf7a971d53a53b316a" gracePeriod=30 Oct 11 03:59:50 crc kubenswrapper[4798]: I1011 03:59:50.729765 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-bdw6s" Oct 11 03:59:50 crc kubenswrapper[4798]: I1011 03:59:50.764433 4798 generic.go:334] "Generic (PLEG): container finished" podID="3de03a8d-34d7-416d-8f1b-330ad429a3b1" containerID="154bffdf90ea7d36f6c03e255c3a7da7395a5d57d71ae8bf7a971d53a53b316a" exitCode=0 Oct 11 03:59:50 crc kubenswrapper[4798]: I1011 03:59:50.764496 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-bdw6s" event={"ID":"3de03a8d-34d7-416d-8f1b-330ad429a3b1","Type":"ContainerDied","Data":"154bffdf90ea7d36f6c03e255c3a7da7395a5d57d71ae8bf7a971d53a53b316a"} Oct 11 03:59:50 crc kubenswrapper[4798]: I1011 03:59:50.764531 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-bdw6s" event={"ID":"3de03a8d-34d7-416d-8f1b-330ad429a3b1","Type":"ContainerDied","Data":"355f05c9487ae5e6f1655c94f81d446993a80c8dba5ac59e1d8437d356f79db4"} Oct 11 03:59:50 crc kubenswrapper[4798]: I1011 03:59:50.764551 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-bdw6s" Oct 11 03:59:50 crc kubenswrapper[4798]: I1011 03:59:50.764565 4798 scope.go:117] "RemoveContainer" containerID="154bffdf90ea7d36f6c03e255c3a7da7395a5d57d71ae8bf7a971d53a53b316a" Oct 11 03:59:50 crc kubenswrapper[4798]: I1011 03:59:50.787180 4798 scope.go:117] "RemoveContainer" containerID="154bffdf90ea7d36f6c03e255c3a7da7395a5d57d71ae8bf7a971d53a53b316a" Oct 11 03:59:50 crc kubenswrapper[4798]: E1011 03:59:50.787673 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"154bffdf90ea7d36f6c03e255c3a7da7395a5d57d71ae8bf7a971d53a53b316a\": container with ID starting with 154bffdf90ea7d36f6c03e255c3a7da7395a5d57d71ae8bf7a971d53a53b316a not found: ID does not exist" containerID="154bffdf90ea7d36f6c03e255c3a7da7395a5d57d71ae8bf7a971d53a53b316a" Oct 11 03:59:50 crc kubenswrapper[4798]: I1011 03:59:50.787716 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"154bffdf90ea7d36f6c03e255c3a7da7395a5d57d71ae8bf7a971d53a53b316a"} err="failed to get container status \"154bffdf90ea7d36f6c03e255c3a7da7395a5d57d71ae8bf7a971d53a53b316a\": rpc error: code = NotFound desc = could not find container \"154bffdf90ea7d36f6c03e255c3a7da7395a5d57d71ae8bf7a971d53a53b316a\": container with ID starting with 154bffdf90ea7d36f6c03e255c3a7da7395a5d57d71ae8bf7a971d53a53b316a not found: ID does not exist" Oct 11 03:59:50 crc kubenswrapper[4798]: I1011 03:59:50.833966 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-storage\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"3de03a8d-34d7-416d-8f1b-330ad429a3b1\" (UID: \"3de03a8d-34d7-416d-8f1b-330ad429a3b1\") " Oct 11 03:59:50 crc kubenswrapper[4798]: I1011 03:59:50.834500 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qkpg9\" (UniqueName: \"kubernetes.io/projected/3de03a8d-34d7-416d-8f1b-330ad429a3b1-kube-api-access-qkpg9\") pod \"3de03a8d-34d7-416d-8f1b-330ad429a3b1\" (UID: \"3de03a8d-34d7-416d-8f1b-330ad429a3b1\") " Oct 11 03:59:50 crc kubenswrapper[4798]: I1011 03:59:50.834603 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/3de03a8d-34d7-416d-8f1b-330ad429a3b1-installation-pull-secrets\") pod \"3de03a8d-34d7-416d-8f1b-330ad429a3b1\" (UID: \"3de03a8d-34d7-416d-8f1b-330ad429a3b1\") " Oct 11 03:59:50 crc kubenswrapper[4798]: I1011 03:59:50.834645 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/3de03a8d-34d7-416d-8f1b-330ad429a3b1-registry-tls\") pod \"3de03a8d-34d7-416d-8f1b-330ad429a3b1\" (UID: \"3de03a8d-34d7-416d-8f1b-330ad429a3b1\") " Oct 11 03:59:50 crc kubenswrapper[4798]: I1011 03:59:50.834713 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/3de03a8d-34d7-416d-8f1b-330ad429a3b1-trusted-ca\") pod \"3de03a8d-34d7-416d-8f1b-330ad429a3b1\" (UID: \"3de03a8d-34d7-416d-8f1b-330ad429a3b1\") " Oct 11 03:59:50 crc kubenswrapper[4798]: I1011 03:59:50.834847 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/3de03a8d-34d7-416d-8f1b-330ad429a3b1-bound-sa-token\") pod \"3de03a8d-34d7-416d-8f1b-330ad429a3b1\" (UID: \"3de03a8d-34d7-416d-8f1b-330ad429a3b1\") " Oct 11 03:59:50 crc kubenswrapper[4798]: I1011 03:59:50.834960 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/3de03a8d-34d7-416d-8f1b-330ad429a3b1-registry-certificates\") pod \"3de03a8d-34d7-416d-8f1b-330ad429a3b1\" (UID: \"3de03a8d-34d7-416d-8f1b-330ad429a3b1\") " Oct 11 03:59:50 crc kubenswrapper[4798]: I1011 03:59:50.835018 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/3de03a8d-34d7-416d-8f1b-330ad429a3b1-ca-trust-extracted\") pod \"3de03a8d-34d7-416d-8f1b-330ad429a3b1\" (UID: \"3de03a8d-34d7-416d-8f1b-330ad429a3b1\") " Oct 11 03:59:50 crc kubenswrapper[4798]: I1011 03:59:50.836848 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3de03a8d-34d7-416d-8f1b-330ad429a3b1-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "3de03a8d-34d7-416d-8f1b-330ad429a3b1" (UID: "3de03a8d-34d7-416d-8f1b-330ad429a3b1"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 03:59:50 crc kubenswrapper[4798]: I1011 03:59:50.836888 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3de03a8d-34d7-416d-8f1b-330ad429a3b1-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "3de03a8d-34d7-416d-8f1b-330ad429a3b1" (UID: "3de03a8d-34d7-416d-8f1b-330ad429a3b1"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 03:59:50 crc kubenswrapper[4798]: I1011 03:59:50.842945 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3de03a8d-34d7-416d-8f1b-330ad429a3b1-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "3de03a8d-34d7-416d-8f1b-330ad429a3b1" (UID: "3de03a8d-34d7-416d-8f1b-330ad429a3b1"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 03:59:50 crc kubenswrapper[4798]: I1011 03:59:50.845218 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3de03a8d-34d7-416d-8f1b-330ad429a3b1-kube-api-access-qkpg9" (OuterVolumeSpecName: "kube-api-access-qkpg9") pod "3de03a8d-34d7-416d-8f1b-330ad429a3b1" (UID: "3de03a8d-34d7-416d-8f1b-330ad429a3b1"). InnerVolumeSpecName "kube-api-access-qkpg9". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 03:59:50 crc kubenswrapper[4798]: I1011 03:59:50.845723 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3de03a8d-34d7-416d-8f1b-330ad429a3b1-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "3de03a8d-34d7-416d-8f1b-330ad429a3b1" (UID: "3de03a8d-34d7-416d-8f1b-330ad429a3b1"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 03:59:50 crc kubenswrapper[4798]: I1011 03:59:50.845863 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3de03a8d-34d7-416d-8f1b-330ad429a3b1-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "3de03a8d-34d7-416d-8f1b-330ad429a3b1" (UID: "3de03a8d-34d7-416d-8f1b-330ad429a3b1"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 03:59:50 crc kubenswrapper[4798]: I1011 03:59:50.856009 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3de03a8d-34d7-416d-8f1b-330ad429a3b1-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "3de03a8d-34d7-416d-8f1b-330ad429a3b1" (UID: "3de03a8d-34d7-416d-8f1b-330ad429a3b1"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 03:59:50 crc kubenswrapper[4798]: I1011 03:59:50.856902 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "registry-storage") pod "3de03a8d-34d7-416d-8f1b-330ad429a3b1" (UID: "3de03a8d-34d7-416d-8f1b-330ad429a3b1"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Oct 11 03:59:50 crc kubenswrapper[4798]: I1011 03:59:50.937780 4798 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/3de03a8d-34d7-416d-8f1b-330ad429a3b1-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Oct 11 03:59:50 crc kubenswrapper[4798]: I1011 03:59:50.937828 4798 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/3de03a8d-34d7-416d-8f1b-330ad429a3b1-registry-tls\") on node \"crc\" DevicePath \"\"" Oct 11 03:59:50 crc kubenswrapper[4798]: I1011 03:59:50.937839 4798 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/3de03a8d-34d7-416d-8f1b-330ad429a3b1-trusted-ca\") on node \"crc\" DevicePath \"\"" Oct 11 03:59:50 crc kubenswrapper[4798]: I1011 03:59:50.937848 4798 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/3de03a8d-34d7-416d-8f1b-330ad429a3b1-bound-sa-token\") on node \"crc\" DevicePath \"\"" Oct 11 03:59:50 crc kubenswrapper[4798]: I1011 03:59:50.937859 4798 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/3de03a8d-34d7-416d-8f1b-330ad429a3b1-registry-certificates\") on node \"crc\" DevicePath \"\"" Oct 11 03:59:50 crc kubenswrapper[4798]: I1011 03:59:50.937868 4798 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/3de03a8d-34d7-416d-8f1b-330ad429a3b1-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Oct 11 03:59:50 crc kubenswrapper[4798]: I1011 03:59:50.937876 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qkpg9\" (UniqueName: \"kubernetes.io/projected/3de03a8d-34d7-416d-8f1b-330ad429a3b1-kube-api-access-qkpg9\") on node \"crc\" DevicePath \"\"" Oct 11 03:59:51 crc kubenswrapper[4798]: I1011 03:59:51.112106 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-bdw6s"] Oct 11 03:59:51 crc kubenswrapper[4798]: I1011 03:59:51.120891 4798 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-bdw6s"] Oct 11 03:59:51 crc kubenswrapper[4798]: I1011 03:59:51.432512 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3de03a8d-34d7-416d-8f1b-330ad429a3b1" path="/var/lib/kubelet/pods/3de03a8d-34d7-416d-8f1b-330ad429a3b1/volumes" Oct 11 04:00:00 crc kubenswrapper[4798]: I1011 04:00:00.146316 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29335920-q6ndn"] Oct 11 04:00:00 crc kubenswrapper[4798]: E1011 04:00:00.147253 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3de03a8d-34d7-416d-8f1b-330ad429a3b1" containerName="registry" Oct 11 04:00:00 crc kubenswrapper[4798]: I1011 04:00:00.147274 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="3de03a8d-34d7-416d-8f1b-330ad429a3b1" containerName="registry" Oct 11 04:00:00 crc kubenswrapper[4798]: I1011 04:00:00.147495 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="3de03a8d-34d7-416d-8f1b-330ad429a3b1" containerName="registry" Oct 11 04:00:00 crc kubenswrapper[4798]: I1011 04:00:00.148055 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29335920-q6ndn" Oct 11 04:00:00 crc kubenswrapper[4798]: I1011 04:00:00.150581 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Oct 11 04:00:00 crc kubenswrapper[4798]: I1011 04:00:00.152304 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Oct 11 04:00:00 crc kubenswrapper[4798]: I1011 04:00:00.159232 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29335920-q6ndn"] Oct 11 04:00:00 crc kubenswrapper[4798]: I1011 04:00:00.280457 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/8d53ca2e-470c-4b35-860b-0977c44cd826-config-volume\") pod \"collect-profiles-29335920-q6ndn\" (UID: \"8d53ca2e-470c-4b35-860b-0977c44cd826\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29335920-q6ndn" Oct 11 04:00:00 crc kubenswrapper[4798]: I1011 04:00:00.280526 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/8d53ca2e-470c-4b35-860b-0977c44cd826-secret-volume\") pod \"collect-profiles-29335920-q6ndn\" (UID: \"8d53ca2e-470c-4b35-860b-0977c44cd826\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29335920-q6ndn" Oct 11 04:00:00 crc kubenswrapper[4798]: I1011 04:00:00.280982 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7d6bz\" (UniqueName: \"kubernetes.io/projected/8d53ca2e-470c-4b35-860b-0977c44cd826-kube-api-access-7d6bz\") pod \"collect-profiles-29335920-q6ndn\" (UID: \"8d53ca2e-470c-4b35-860b-0977c44cd826\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29335920-q6ndn" Oct 11 04:00:00 crc kubenswrapper[4798]: I1011 04:00:00.382209 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/8d53ca2e-470c-4b35-860b-0977c44cd826-config-volume\") pod \"collect-profiles-29335920-q6ndn\" (UID: \"8d53ca2e-470c-4b35-860b-0977c44cd826\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29335920-q6ndn" Oct 11 04:00:00 crc kubenswrapper[4798]: I1011 04:00:00.382286 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/8d53ca2e-470c-4b35-860b-0977c44cd826-secret-volume\") pod \"collect-profiles-29335920-q6ndn\" (UID: \"8d53ca2e-470c-4b35-860b-0977c44cd826\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29335920-q6ndn" Oct 11 04:00:00 crc kubenswrapper[4798]: I1011 04:00:00.382319 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7d6bz\" (UniqueName: \"kubernetes.io/projected/8d53ca2e-470c-4b35-860b-0977c44cd826-kube-api-access-7d6bz\") pod \"collect-profiles-29335920-q6ndn\" (UID: \"8d53ca2e-470c-4b35-860b-0977c44cd826\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29335920-q6ndn" Oct 11 04:00:00 crc kubenswrapper[4798]: I1011 04:00:00.383146 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/8d53ca2e-470c-4b35-860b-0977c44cd826-config-volume\") pod \"collect-profiles-29335920-q6ndn\" (UID: \"8d53ca2e-470c-4b35-860b-0977c44cd826\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29335920-q6ndn" Oct 11 04:00:00 crc kubenswrapper[4798]: I1011 04:00:00.394636 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/8d53ca2e-470c-4b35-860b-0977c44cd826-secret-volume\") pod \"collect-profiles-29335920-q6ndn\" (UID: \"8d53ca2e-470c-4b35-860b-0977c44cd826\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29335920-q6ndn" Oct 11 04:00:00 crc kubenswrapper[4798]: I1011 04:00:00.399358 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7d6bz\" (UniqueName: \"kubernetes.io/projected/8d53ca2e-470c-4b35-860b-0977c44cd826-kube-api-access-7d6bz\") pod \"collect-profiles-29335920-q6ndn\" (UID: \"8d53ca2e-470c-4b35-860b-0977c44cd826\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29335920-q6ndn" Oct 11 04:00:00 crc kubenswrapper[4798]: I1011 04:00:00.472815 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29335920-q6ndn" Oct 11 04:00:00 crc kubenswrapper[4798]: I1011 04:00:00.684900 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29335920-q6ndn"] Oct 11 04:00:00 crc kubenswrapper[4798]: W1011 04:00:00.692458 4798 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8d53ca2e_470c_4b35_860b_0977c44cd826.slice/crio-98139feac5a5eeebdb9ca241e31feaa1c54e5c584d5126cb64b5289578ebca2b WatchSource:0}: Error finding container 98139feac5a5eeebdb9ca241e31feaa1c54e5c584d5126cb64b5289578ebca2b: Status 404 returned error can't find the container with id 98139feac5a5eeebdb9ca241e31feaa1c54e5c584d5126cb64b5289578ebca2b Oct 11 04:00:00 crc kubenswrapper[4798]: I1011 04:00:00.836187 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29335920-q6ndn" event={"ID":"8d53ca2e-470c-4b35-860b-0977c44cd826","Type":"ContainerStarted","Data":"98139feac5a5eeebdb9ca241e31feaa1c54e5c584d5126cb64b5289578ebca2b"} Oct 11 04:00:01 crc kubenswrapper[4798]: I1011 04:00:01.846966 4798 generic.go:334] "Generic (PLEG): container finished" podID="8d53ca2e-470c-4b35-860b-0977c44cd826" containerID="f73f3116bbbab7aeeb118d43293a53d67324db76c4e47d2b8dd2a153410da819" exitCode=0 Oct 11 04:00:01 crc kubenswrapper[4798]: I1011 04:00:01.847029 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29335920-q6ndn" event={"ID":"8d53ca2e-470c-4b35-860b-0977c44cd826","Type":"ContainerDied","Data":"f73f3116bbbab7aeeb118d43293a53d67324db76c4e47d2b8dd2a153410da819"} Oct 11 04:00:03 crc kubenswrapper[4798]: I1011 04:00:03.096008 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29335920-q6ndn" Oct 11 04:00:03 crc kubenswrapper[4798]: I1011 04:00:03.120504 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/8d53ca2e-470c-4b35-860b-0977c44cd826-secret-volume\") pod \"8d53ca2e-470c-4b35-860b-0977c44cd826\" (UID: \"8d53ca2e-470c-4b35-860b-0977c44cd826\") " Oct 11 04:00:03 crc kubenswrapper[4798]: I1011 04:00:03.120555 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/8d53ca2e-470c-4b35-860b-0977c44cd826-config-volume\") pod \"8d53ca2e-470c-4b35-860b-0977c44cd826\" (UID: \"8d53ca2e-470c-4b35-860b-0977c44cd826\") " Oct 11 04:00:03 crc kubenswrapper[4798]: I1011 04:00:03.120606 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7d6bz\" (UniqueName: \"kubernetes.io/projected/8d53ca2e-470c-4b35-860b-0977c44cd826-kube-api-access-7d6bz\") pod \"8d53ca2e-470c-4b35-860b-0977c44cd826\" (UID: \"8d53ca2e-470c-4b35-860b-0977c44cd826\") " Oct 11 04:00:03 crc kubenswrapper[4798]: I1011 04:00:03.121374 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8d53ca2e-470c-4b35-860b-0977c44cd826-config-volume" (OuterVolumeSpecName: "config-volume") pod "8d53ca2e-470c-4b35-860b-0977c44cd826" (UID: "8d53ca2e-470c-4b35-860b-0977c44cd826"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 04:00:03 crc kubenswrapper[4798]: I1011 04:00:03.128164 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8d53ca2e-470c-4b35-860b-0977c44cd826-kube-api-access-7d6bz" (OuterVolumeSpecName: "kube-api-access-7d6bz") pod "8d53ca2e-470c-4b35-860b-0977c44cd826" (UID: "8d53ca2e-470c-4b35-860b-0977c44cd826"). InnerVolumeSpecName "kube-api-access-7d6bz". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 04:00:03 crc kubenswrapper[4798]: I1011 04:00:03.128634 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8d53ca2e-470c-4b35-860b-0977c44cd826-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "8d53ca2e-470c-4b35-860b-0977c44cd826" (UID: "8d53ca2e-470c-4b35-860b-0977c44cd826"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:00:03 crc kubenswrapper[4798]: I1011 04:00:03.221585 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7d6bz\" (UniqueName: \"kubernetes.io/projected/8d53ca2e-470c-4b35-860b-0977c44cd826-kube-api-access-7d6bz\") on node \"crc\" DevicePath \"\"" Oct 11 04:00:03 crc kubenswrapper[4798]: I1011 04:00:03.221617 4798 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/8d53ca2e-470c-4b35-860b-0977c44cd826-secret-volume\") on node \"crc\" DevicePath \"\"" Oct 11 04:00:03 crc kubenswrapper[4798]: I1011 04:00:03.221627 4798 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/8d53ca2e-470c-4b35-860b-0977c44cd826-config-volume\") on node \"crc\" DevicePath \"\"" Oct 11 04:00:03 crc kubenswrapper[4798]: I1011 04:00:03.862447 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29335920-q6ndn" event={"ID":"8d53ca2e-470c-4b35-860b-0977c44cd826","Type":"ContainerDied","Data":"98139feac5a5eeebdb9ca241e31feaa1c54e5c584d5126cb64b5289578ebca2b"} Oct 11 04:00:03 crc kubenswrapper[4798]: I1011 04:00:03.862496 4798 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="98139feac5a5eeebdb9ca241e31feaa1c54e5c584d5126cb64b5289578ebca2b" Oct 11 04:00:03 crc kubenswrapper[4798]: I1011 04:00:03.862554 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29335920-q6ndn" Oct 11 04:00:27 crc kubenswrapper[4798]: I1011 04:00:27.138973 4798 patch_prober.go:28] interesting pod/machine-config-daemon-h28s2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 11 04:00:27 crc kubenswrapper[4798]: I1011 04:00:27.140221 4798 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 11 04:00:57 crc kubenswrapper[4798]: I1011 04:00:57.139201 4798 patch_prober.go:28] interesting pod/machine-config-daemon-h28s2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 11 04:00:57 crc kubenswrapper[4798]: I1011 04:00:57.140365 4798 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 11 04:01:27 crc kubenswrapper[4798]: I1011 04:01:27.139588 4798 patch_prober.go:28] interesting pod/machine-config-daemon-h28s2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 11 04:01:27 crc kubenswrapper[4798]: I1011 04:01:27.140655 4798 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 11 04:01:27 crc kubenswrapper[4798]: I1011 04:01:27.140754 4798 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" Oct 11 04:01:27 crc kubenswrapper[4798]: I1011 04:01:27.141755 4798 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"166d4bff6ae77be3247b1d1d53d8215160cadf3101be02557d941e30fb2763c3"} pod="openshift-machine-config-operator/machine-config-daemon-h28s2" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Oct 11 04:01:27 crc kubenswrapper[4798]: I1011 04:01:27.141867 4798 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" containerName="machine-config-daemon" containerID="cri-o://166d4bff6ae77be3247b1d1d53d8215160cadf3101be02557d941e30fb2763c3" gracePeriod=600 Oct 11 04:01:27 crc kubenswrapper[4798]: I1011 04:01:27.503136 4798 generic.go:334] "Generic (PLEG): container finished" podID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" containerID="166d4bff6ae77be3247b1d1d53d8215160cadf3101be02557d941e30fb2763c3" exitCode=0 Oct 11 04:01:27 crc kubenswrapper[4798]: I1011 04:01:27.503602 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" event={"ID":"42571bc8-2186-4e3b-bba9-28f5a8f364d0","Type":"ContainerDied","Data":"166d4bff6ae77be3247b1d1d53d8215160cadf3101be02557d941e30fb2763c3"} Oct 11 04:01:27 crc kubenswrapper[4798]: I1011 04:01:27.503735 4798 scope.go:117] "RemoveContainer" containerID="d4862faa12fa49bd57098b8b4e9e9dd287da8c8808a9ca32bfdd3a9b4313945d" Oct 11 04:01:28 crc kubenswrapper[4798]: I1011 04:01:28.516024 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" event={"ID":"42571bc8-2186-4e3b-bba9-28f5a8f364d0","Type":"ContainerStarted","Data":"9b94e5c3b0d64dfba361ffe34f00ea99c8f4d7e6983b87de0fde65e4be638ab1"} Oct 11 04:03:27 crc kubenswrapper[4798]: I1011 04:03:27.139240 4798 patch_prober.go:28] interesting pod/machine-config-daemon-h28s2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 11 04:03:27 crc kubenswrapper[4798]: I1011 04:03:27.140531 4798 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 11 04:03:57 crc kubenswrapper[4798]: I1011 04:03:57.139100 4798 patch_prober.go:28] interesting pod/machine-config-daemon-h28s2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 11 04:03:57 crc kubenswrapper[4798]: I1011 04:03:57.139781 4798 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 11 04:04:20 crc kubenswrapper[4798]: I1011 04:04:20.451126 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-cainjector-7f985d654d-fgz9c"] Oct 11 04:04:20 crc kubenswrapper[4798]: E1011 04:04:20.451909 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8d53ca2e-470c-4b35-860b-0977c44cd826" containerName="collect-profiles" Oct 11 04:04:20 crc kubenswrapper[4798]: I1011 04:04:20.451923 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="8d53ca2e-470c-4b35-860b-0977c44cd826" containerName="collect-profiles" Oct 11 04:04:20 crc kubenswrapper[4798]: I1011 04:04:20.452030 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="8d53ca2e-470c-4b35-860b-0977c44cd826" containerName="collect-profiles" Oct 11 04:04:20 crc kubenswrapper[4798]: I1011 04:04:20.452490 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-7f985d654d-fgz9c" Oct 11 04:04:20 crc kubenswrapper[4798]: I1011 04:04:20.461051 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"openshift-service-ca.crt" Oct 11 04:04:20 crc kubenswrapper[4798]: I1011 04:04:20.461152 4798 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-cainjector-dockercfg-r6xm2" Oct 11 04:04:20 crc kubenswrapper[4798]: I1011 04:04:20.463585 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"kube-root-ca.crt" Oct 11 04:04:20 crc kubenswrapper[4798]: I1011 04:04:20.476163 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-webhook-5655c58dd6-kkd7l"] Oct 11 04:04:20 crc kubenswrapper[4798]: I1011 04:04:20.477026 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-5655c58dd6-kkd7l" Oct 11 04:04:20 crc kubenswrapper[4798]: I1011 04:04:20.480405 4798 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-webhook-dockercfg-nkjkj" Oct 11 04:04:20 crc kubenswrapper[4798]: I1011 04:04:20.480724 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-5b446d88c5-b7hmq"] Oct 11 04:04:20 crc kubenswrapper[4798]: I1011 04:04:20.481587 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-5b446d88c5-b7hmq" Oct 11 04:04:20 crc kubenswrapper[4798]: I1011 04:04:20.485324 4798 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-dockercfg-zc5hm" Oct 11 04:04:20 crc kubenswrapper[4798]: I1011 04:04:20.498880 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-5655c58dd6-kkd7l"] Oct 11 04:04:20 crc kubenswrapper[4798]: I1011 04:04:20.510296 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-5b446d88c5-b7hmq"] Oct 11 04:04:20 crc kubenswrapper[4798]: I1011 04:04:20.520898 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-7f985d654d-fgz9c"] Oct 11 04:04:20 crc kubenswrapper[4798]: I1011 04:04:20.572151 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-265pp\" (UniqueName: \"kubernetes.io/projected/5e28bfca-a266-4225-8f9f-4d7e71691047-kube-api-access-265pp\") pod \"cert-manager-cainjector-7f985d654d-fgz9c\" (UID: \"5e28bfca-a266-4225-8f9f-4d7e71691047\") " pod="cert-manager/cert-manager-cainjector-7f985d654d-fgz9c" Oct 11 04:04:20 crc kubenswrapper[4798]: I1011 04:04:20.673841 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-glh8p\" (UniqueName: \"kubernetes.io/projected/0564940f-8ec6-4a8f-8f1e-cda48ac953c2-kube-api-access-glh8p\") pod \"cert-manager-webhook-5655c58dd6-kkd7l\" (UID: \"0564940f-8ec6-4a8f-8f1e-cda48ac953c2\") " pod="cert-manager/cert-manager-webhook-5655c58dd6-kkd7l" Oct 11 04:04:20 crc kubenswrapper[4798]: I1011 04:04:20.673929 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s2nwq\" (UniqueName: \"kubernetes.io/projected/25dd9371-dab0-4e09-b2a3-da8a9dfe7135-kube-api-access-s2nwq\") pod \"cert-manager-5b446d88c5-b7hmq\" (UID: \"25dd9371-dab0-4e09-b2a3-da8a9dfe7135\") " pod="cert-manager/cert-manager-5b446d88c5-b7hmq" Oct 11 04:04:20 crc kubenswrapper[4798]: I1011 04:04:20.674009 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-265pp\" (UniqueName: \"kubernetes.io/projected/5e28bfca-a266-4225-8f9f-4d7e71691047-kube-api-access-265pp\") pod \"cert-manager-cainjector-7f985d654d-fgz9c\" (UID: \"5e28bfca-a266-4225-8f9f-4d7e71691047\") " pod="cert-manager/cert-manager-cainjector-7f985d654d-fgz9c" Oct 11 04:04:20 crc kubenswrapper[4798]: I1011 04:04:20.694730 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-265pp\" (UniqueName: \"kubernetes.io/projected/5e28bfca-a266-4225-8f9f-4d7e71691047-kube-api-access-265pp\") pod \"cert-manager-cainjector-7f985d654d-fgz9c\" (UID: \"5e28bfca-a266-4225-8f9f-4d7e71691047\") " pod="cert-manager/cert-manager-cainjector-7f985d654d-fgz9c" Oct 11 04:04:20 crc kubenswrapper[4798]: I1011 04:04:20.768014 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-7f985d654d-fgz9c" Oct 11 04:04:20 crc kubenswrapper[4798]: I1011 04:04:20.775729 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-glh8p\" (UniqueName: \"kubernetes.io/projected/0564940f-8ec6-4a8f-8f1e-cda48ac953c2-kube-api-access-glh8p\") pod \"cert-manager-webhook-5655c58dd6-kkd7l\" (UID: \"0564940f-8ec6-4a8f-8f1e-cda48ac953c2\") " pod="cert-manager/cert-manager-webhook-5655c58dd6-kkd7l" Oct 11 04:04:20 crc kubenswrapper[4798]: I1011 04:04:20.776105 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2nwq\" (UniqueName: \"kubernetes.io/projected/25dd9371-dab0-4e09-b2a3-da8a9dfe7135-kube-api-access-s2nwq\") pod \"cert-manager-5b446d88c5-b7hmq\" (UID: \"25dd9371-dab0-4e09-b2a3-da8a9dfe7135\") " pod="cert-manager/cert-manager-5b446d88c5-b7hmq" Oct 11 04:04:20 crc kubenswrapper[4798]: I1011 04:04:20.801330 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2nwq\" (UniqueName: \"kubernetes.io/projected/25dd9371-dab0-4e09-b2a3-da8a9dfe7135-kube-api-access-s2nwq\") pod \"cert-manager-5b446d88c5-b7hmq\" (UID: \"25dd9371-dab0-4e09-b2a3-da8a9dfe7135\") " pod="cert-manager/cert-manager-5b446d88c5-b7hmq" Oct 11 04:04:20 crc kubenswrapper[4798]: I1011 04:04:20.801590 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-glh8p\" (UniqueName: \"kubernetes.io/projected/0564940f-8ec6-4a8f-8f1e-cda48ac953c2-kube-api-access-glh8p\") pod \"cert-manager-webhook-5655c58dd6-kkd7l\" (UID: \"0564940f-8ec6-4a8f-8f1e-cda48ac953c2\") " pod="cert-manager/cert-manager-webhook-5655c58dd6-kkd7l" Oct 11 04:04:20 crc kubenswrapper[4798]: I1011 04:04:20.801686 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-5b446d88c5-b7hmq" Oct 11 04:04:20 crc kubenswrapper[4798]: I1011 04:04:20.986994 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-7f985d654d-fgz9c"] Oct 11 04:04:21 crc kubenswrapper[4798]: I1011 04:04:21.002545 4798 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Oct 11 04:04:21 crc kubenswrapper[4798]: I1011 04:04:21.057972 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-5b446d88c5-b7hmq"] Oct 11 04:04:21 crc kubenswrapper[4798]: W1011 04:04:21.066238 4798 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod25dd9371_dab0_4e09_b2a3_da8a9dfe7135.slice/crio-6ca6164216eade174b94d25af419c7c82e433596d5839f49bbe9e2abcb3e8b58 WatchSource:0}: Error finding container 6ca6164216eade174b94d25af419c7c82e433596d5839f49bbe9e2abcb3e8b58: Status 404 returned error can't find the container with id 6ca6164216eade174b94d25af419c7c82e433596d5839f49bbe9e2abcb3e8b58 Oct 11 04:04:21 crc kubenswrapper[4798]: I1011 04:04:21.091068 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-5655c58dd6-kkd7l" Oct 11 04:04:21 crc kubenswrapper[4798]: I1011 04:04:21.264252 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-5655c58dd6-kkd7l"] Oct 11 04:04:21 crc kubenswrapper[4798]: I1011 04:04:21.713277 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-5655c58dd6-kkd7l" event={"ID":"0564940f-8ec6-4a8f-8f1e-cda48ac953c2","Type":"ContainerStarted","Data":"44ae06924042cfa19924189bbe529ed4a852c10dc40cd78466f713a17dd7d029"} Oct 11 04:04:21 crc kubenswrapper[4798]: I1011 04:04:21.715836 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-7f985d654d-fgz9c" event={"ID":"5e28bfca-a266-4225-8f9f-4d7e71691047","Type":"ContainerStarted","Data":"493411e0c6a96909dac4001626d28400c51006eb2afcaae17526d24f35097de9"} Oct 11 04:04:21 crc kubenswrapper[4798]: I1011 04:04:21.716936 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-5b446d88c5-b7hmq" event={"ID":"25dd9371-dab0-4e09-b2a3-da8a9dfe7135","Type":"ContainerStarted","Data":"6ca6164216eade174b94d25af419c7c82e433596d5839f49bbe9e2abcb3e8b58"} Oct 11 04:04:24 crc kubenswrapper[4798]: I1011 04:04:24.735288 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-5b446d88c5-b7hmq" event={"ID":"25dd9371-dab0-4e09-b2a3-da8a9dfe7135","Type":"ContainerStarted","Data":"a1208e3a76be4f8605ee8c12dc88885861d18306cc39b71abd1395127720d7e8"} Oct 11 04:04:24 crc kubenswrapper[4798]: I1011 04:04:24.737908 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-5655c58dd6-kkd7l" event={"ID":"0564940f-8ec6-4a8f-8f1e-cda48ac953c2","Type":"ContainerStarted","Data":"f9da488bfbdcf2f86142e038efc72359c7c53ed129fd2030e0e1c1a3062954b0"} Oct 11 04:04:24 crc kubenswrapper[4798]: I1011 04:04:24.738066 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="cert-manager/cert-manager-webhook-5655c58dd6-kkd7l" Oct 11 04:04:24 crc kubenswrapper[4798]: I1011 04:04:24.739336 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-7f985d654d-fgz9c" event={"ID":"5e28bfca-a266-4225-8f9f-4d7e71691047","Type":"ContainerStarted","Data":"1320325d4aa41a37d54c4651a4062e16dd215cf5a36c428ef20b8c5ec396aebe"} Oct 11 04:04:24 crc kubenswrapper[4798]: I1011 04:04:24.758510 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-5b446d88c5-b7hmq" podStartSLOduration=1.740184843 podStartE2EDuration="4.758486305s" podCreationTimestamp="2025-10-11 04:04:20 +0000 UTC" firstStartedPulling="2025-10-11 04:04:21.068166186 +0000 UTC m=+556.404455882" lastFinishedPulling="2025-10-11 04:04:24.086467648 +0000 UTC m=+559.422757344" observedRunningTime="2025-10-11 04:04:24.752586403 +0000 UTC m=+560.088876089" watchObservedRunningTime="2025-10-11 04:04:24.758486305 +0000 UTC m=+560.094775991" Oct 11 04:04:24 crc kubenswrapper[4798]: I1011 04:04:24.771271 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-cainjector-7f985d654d-fgz9c" podStartSLOduration=2.206926374 podStartE2EDuration="4.771248189s" podCreationTimestamp="2025-10-11 04:04:20 +0000 UTC" firstStartedPulling="2025-10-11 04:04:21.002263924 +0000 UTC m=+556.338553610" lastFinishedPulling="2025-10-11 04:04:23.566585739 +0000 UTC m=+558.902875425" observedRunningTime="2025-10-11 04:04:24.769450715 +0000 UTC m=+560.105740411" watchObservedRunningTime="2025-10-11 04:04:24.771248189 +0000 UTC m=+560.107537885" Oct 11 04:04:24 crc kubenswrapper[4798]: I1011 04:04:24.785415 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-webhook-5655c58dd6-kkd7l" podStartSLOduration=1.859755314 podStartE2EDuration="4.785377666s" podCreationTimestamp="2025-10-11 04:04:20 +0000 UTC" firstStartedPulling="2025-10-11 04:04:21.269923447 +0000 UTC m=+556.606213133" lastFinishedPulling="2025-10-11 04:04:24.195545799 +0000 UTC m=+559.531835485" observedRunningTime="2025-10-11 04:04:24.783207804 +0000 UTC m=+560.119497500" watchObservedRunningTime="2025-10-11 04:04:24.785377666 +0000 UTC m=+560.121667352" Oct 11 04:04:27 crc kubenswrapper[4798]: I1011 04:04:27.139277 4798 patch_prober.go:28] interesting pod/machine-config-daemon-h28s2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 11 04:04:27 crc kubenswrapper[4798]: I1011 04:04:27.139838 4798 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 11 04:04:27 crc kubenswrapper[4798]: I1011 04:04:27.139893 4798 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" Oct 11 04:04:27 crc kubenswrapper[4798]: I1011 04:04:27.140552 4798 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"9b94e5c3b0d64dfba361ffe34f00ea99c8f4d7e6983b87de0fde65e4be638ab1"} pod="openshift-machine-config-operator/machine-config-daemon-h28s2" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Oct 11 04:04:27 crc kubenswrapper[4798]: I1011 04:04:27.140608 4798 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" containerName="machine-config-daemon" containerID="cri-o://9b94e5c3b0d64dfba361ffe34f00ea99c8f4d7e6983b87de0fde65e4be638ab1" gracePeriod=600 Oct 11 04:04:27 crc kubenswrapper[4798]: I1011 04:04:27.761835 4798 generic.go:334] "Generic (PLEG): container finished" podID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" containerID="9b94e5c3b0d64dfba361ffe34f00ea99c8f4d7e6983b87de0fde65e4be638ab1" exitCode=0 Oct 11 04:04:27 crc kubenswrapper[4798]: I1011 04:04:27.761945 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" event={"ID":"42571bc8-2186-4e3b-bba9-28f5a8f364d0","Type":"ContainerDied","Data":"9b94e5c3b0d64dfba361ffe34f00ea99c8f4d7e6983b87de0fde65e4be638ab1"} Oct 11 04:04:27 crc kubenswrapper[4798]: I1011 04:04:27.762322 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" event={"ID":"42571bc8-2186-4e3b-bba9-28f5a8f364d0","Type":"ContainerStarted","Data":"c3b0e40f46ff6d5e205dd95a80f3265b5b1e7509ae9d7aa46df649af185d464c"} Oct 11 04:04:27 crc kubenswrapper[4798]: I1011 04:04:27.762361 4798 scope.go:117] "RemoveContainer" containerID="166d4bff6ae77be3247b1d1d53d8215160cadf3101be02557d941e30fb2763c3" Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.071358 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-svt4z"] Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.072330 4798 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-svt4z" podUID="3c2a342b-5252-4957-9e2c-8f81c304b8af" containerName="ovn-controller" containerID="cri-o://4c9529e6c53d42e111799753c41ef620b382b2a2b1991aa71d4ee846029a5136" gracePeriod=30 Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.072450 4798 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-svt4z" podUID="3c2a342b-5252-4957-9e2c-8f81c304b8af" containerName="nbdb" containerID="cri-o://dd63d88fe2ebe6035765ee3f3c05c2ef747f9b2869da239825c08667935c9fb5" gracePeriod=30 Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.072493 4798 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-svt4z" podUID="3c2a342b-5252-4957-9e2c-8f81c304b8af" containerName="kube-rbac-proxy-node" containerID="cri-o://e5a66de04a274f07f090f66d9da727e1985f9c171038de366b409a0300d1b6cc" gracePeriod=30 Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.072611 4798 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-svt4z" podUID="3c2a342b-5252-4957-9e2c-8f81c304b8af" containerName="sbdb" containerID="cri-o://732433bbe785758ed4cbedf667608c4780a5d1e7e2eafd432735f0d74ea01d48" gracePeriod=30 Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.072581 4798 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-svt4z" podUID="3c2a342b-5252-4957-9e2c-8f81c304b8af" containerName="northd" containerID="cri-o://671945e25068ff79ab8574dd96b2d81646b8240da41f34a10aa857f0c2520df2" gracePeriod=30 Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.072645 4798 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-svt4z" podUID="3c2a342b-5252-4957-9e2c-8f81c304b8af" containerName="ovn-acl-logging" containerID="cri-o://337fb496a29d8027f6400b57b1625b68029a16e368139f753b9b94df22448477" gracePeriod=30 Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.072486 4798 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-svt4z" podUID="3c2a342b-5252-4957-9e2c-8f81c304b8af" containerName="kube-rbac-proxy-ovn-metrics" containerID="cri-o://5667e3d7288833ff8c576f64f111c3a2f59619a3577f969d0afdd633962bd30d" gracePeriod=30 Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.102307 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="cert-manager/cert-manager-webhook-5655c58dd6-kkd7l" Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.109688 4798 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-svt4z" podUID="3c2a342b-5252-4957-9e2c-8f81c304b8af" containerName="ovnkube-controller" containerID="cri-o://25f75915fce6470de042b47f0a1f09f1debcb13ab242d5e2a6bd64c4626c72e8" gracePeriod=30 Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.425675 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-svt4z_3c2a342b-5252-4957-9e2c-8f81c304b8af/ovnkube-controller/3.log" Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.430967 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-svt4z_3c2a342b-5252-4957-9e2c-8f81c304b8af/ovn-acl-logging/0.log" Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.431556 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-svt4z_3c2a342b-5252-4957-9e2c-8f81c304b8af/ovn-controller/0.log" Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.432083 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-svt4z" Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.489037 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-t5nsh"] Oct 11 04:04:31 crc kubenswrapper[4798]: E1011 04:04:31.489262 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3c2a342b-5252-4957-9e2c-8f81c304b8af" containerName="ovnkube-controller" Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.489276 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="3c2a342b-5252-4957-9e2c-8f81c304b8af" containerName="ovnkube-controller" Oct 11 04:04:31 crc kubenswrapper[4798]: E1011 04:04:31.489288 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3c2a342b-5252-4957-9e2c-8f81c304b8af" containerName="ovn-controller" Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.489294 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="3c2a342b-5252-4957-9e2c-8f81c304b8af" containerName="ovn-controller" Oct 11 04:04:31 crc kubenswrapper[4798]: E1011 04:04:31.489305 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3c2a342b-5252-4957-9e2c-8f81c304b8af" containerName="ovnkube-controller" Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.489313 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="3c2a342b-5252-4957-9e2c-8f81c304b8af" containerName="ovnkube-controller" Oct 11 04:04:31 crc kubenswrapper[4798]: E1011 04:04:31.489321 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3c2a342b-5252-4957-9e2c-8f81c304b8af" containerName="kube-rbac-proxy-node" Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.489329 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="3c2a342b-5252-4957-9e2c-8f81c304b8af" containerName="kube-rbac-proxy-node" Oct 11 04:04:31 crc kubenswrapper[4798]: E1011 04:04:31.489338 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3c2a342b-5252-4957-9e2c-8f81c304b8af" containerName="nbdb" Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.489345 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="3c2a342b-5252-4957-9e2c-8f81c304b8af" containerName="nbdb" Oct 11 04:04:31 crc kubenswrapper[4798]: E1011 04:04:31.489352 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3c2a342b-5252-4957-9e2c-8f81c304b8af" containerName="kube-rbac-proxy-ovn-metrics" Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.489358 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="3c2a342b-5252-4957-9e2c-8f81c304b8af" containerName="kube-rbac-proxy-ovn-metrics" Oct 11 04:04:31 crc kubenswrapper[4798]: E1011 04:04:31.489370 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3c2a342b-5252-4957-9e2c-8f81c304b8af" containerName="ovn-acl-logging" Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.489407 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="3c2a342b-5252-4957-9e2c-8f81c304b8af" containerName="ovn-acl-logging" Oct 11 04:04:31 crc kubenswrapper[4798]: E1011 04:04:31.489417 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3c2a342b-5252-4957-9e2c-8f81c304b8af" containerName="sbdb" Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.489423 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="3c2a342b-5252-4957-9e2c-8f81c304b8af" containerName="sbdb" Oct 11 04:04:31 crc kubenswrapper[4798]: E1011 04:04:31.489435 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3c2a342b-5252-4957-9e2c-8f81c304b8af" containerName="kubecfg-setup" Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.489441 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="3c2a342b-5252-4957-9e2c-8f81c304b8af" containerName="kubecfg-setup" Oct 11 04:04:31 crc kubenswrapper[4798]: E1011 04:04:31.489451 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3c2a342b-5252-4957-9e2c-8f81c304b8af" containerName="ovnkube-controller" Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.489457 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="3c2a342b-5252-4957-9e2c-8f81c304b8af" containerName="ovnkube-controller" Oct 11 04:04:31 crc kubenswrapper[4798]: E1011 04:04:31.489463 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3c2a342b-5252-4957-9e2c-8f81c304b8af" containerName="ovnkube-controller" Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.489469 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="3c2a342b-5252-4957-9e2c-8f81c304b8af" containerName="ovnkube-controller" Oct 11 04:04:31 crc kubenswrapper[4798]: E1011 04:04:31.489476 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3c2a342b-5252-4957-9e2c-8f81c304b8af" containerName="northd" Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.489482 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="3c2a342b-5252-4957-9e2c-8f81c304b8af" containerName="northd" Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.489596 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="3c2a342b-5252-4957-9e2c-8f81c304b8af" containerName="northd" Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.489609 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="3c2a342b-5252-4957-9e2c-8f81c304b8af" containerName="ovnkube-controller" Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.489617 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="3c2a342b-5252-4957-9e2c-8f81c304b8af" containerName="nbdb" Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.489624 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="3c2a342b-5252-4957-9e2c-8f81c304b8af" containerName="kube-rbac-proxy-node" Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.489631 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="3c2a342b-5252-4957-9e2c-8f81c304b8af" containerName="ovnkube-controller" Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.489639 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="3c2a342b-5252-4957-9e2c-8f81c304b8af" containerName="ovn-controller" Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.489648 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="3c2a342b-5252-4957-9e2c-8f81c304b8af" containerName="ovnkube-controller" Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.489655 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="3c2a342b-5252-4957-9e2c-8f81c304b8af" containerName="kube-rbac-proxy-ovn-metrics" Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.489663 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="3c2a342b-5252-4957-9e2c-8f81c304b8af" containerName="ovnkube-controller" Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.489669 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="3c2a342b-5252-4957-9e2c-8f81c304b8af" containerName="ovn-acl-logging" Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.489675 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="3c2a342b-5252-4957-9e2c-8f81c304b8af" containerName="sbdb" Oct 11 04:04:31 crc kubenswrapper[4798]: E1011 04:04:31.489761 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3c2a342b-5252-4957-9e2c-8f81c304b8af" containerName="ovnkube-controller" Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.489769 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="3c2a342b-5252-4957-9e2c-8f81c304b8af" containerName="ovnkube-controller" Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.489873 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="3c2a342b-5252-4957-9e2c-8f81c304b8af" containerName="ovnkube-controller" Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.511175 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-t5nsh" Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.629759 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/3c2a342b-5252-4957-9e2c-8f81c304b8af-env-overrides\") pod \"3c2a342b-5252-4957-9e2c-8f81c304b8af\" (UID: \"3c2a342b-5252-4957-9e2c-8f81c304b8af\") " Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.630217 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/3c2a342b-5252-4957-9e2c-8f81c304b8af-host-run-ovn-kubernetes\") pod \"3c2a342b-5252-4957-9e2c-8f81c304b8af\" (UID: \"3c2a342b-5252-4957-9e2c-8f81c304b8af\") " Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.630244 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/3c2a342b-5252-4957-9e2c-8f81c304b8af-ovnkube-script-lib\") pod \"3c2a342b-5252-4957-9e2c-8f81c304b8af\" (UID: \"3c2a342b-5252-4957-9e2c-8f81c304b8af\") " Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.630270 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jtk6h\" (UniqueName: \"kubernetes.io/projected/3c2a342b-5252-4957-9e2c-8f81c304b8af-kube-api-access-jtk6h\") pod \"3c2a342b-5252-4957-9e2c-8f81c304b8af\" (UID: \"3c2a342b-5252-4957-9e2c-8f81c304b8af\") " Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.630289 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3c2a342b-5252-4957-9e2c-8f81c304b8af-host-run-ovn-kubernetes" (OuterVolumeSpecName: "host-run-ovn-kubernetes") pod "3c2a342b-5252-4957-9e2c-8f81c304b8af" (UID: "3c2a342b-5252-4957-9e2c-8f81c304b8af"). InnerVolumeSpecName "host-run-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.630307 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/3c2a342b-5252-4957-9e2c-8f81c304b8af-run-ovn\") pod \"3c2a342b-5252-4957-9e2c-8f81c304b8af\" (UID: \"3c2a342b-5252-4957-9e2c-8f81c304b8af\") " Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.630321 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/3c2a342b-5252-4957-9e2c-8f81c304b8af-host-kubelet\") pod \"3c2a342b-5252-4957-9e2c-8f81c304b8af\" (UID: \"3c2a342b-5252-4957-9e2c-8f81c304b8af\") " Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.630332 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3c2a342b-5252-4957-9e2c-8f81c304b8af-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "3c2a342b-5252-4957-9e2c-8f81c304b8af" (UID: "3c2a342b-5252-4957-9e2c-8f81c304b8af"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.630352 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/3c2a342b-5252-4957-9e2c-8f81c304b8af-ovnkube-config\") pod \"3c2a342b-5252-4957-9e2c-8f81c304b8af\" (UID: \"3c2a342b-5252-4957-9e2c-8f81c304b8af\") " Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.630367 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/3c2a342b-5252-4957-9e2c-8f81c304b8af-log-socket\") pod \"3c2a342b-5252-4957-9e2c-8f81c304b8af\" (UID: \"3c2a342b-5252-4957-9e2c-8f81c304b8af\") " Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.630406 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/3c2a342b-5252-4957-9e2c-8f81c304b8af-run-systemd\") pod \"3c2a342b-5252-4957-9e2c-8f81c304b8af\" (UID: \"3c2a342b-5252-4957-9e2c-8f81c304b8af\") " Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.630442 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3c2a342b-5252-4957-9e2c-8f81c304b8af-etc-openvswitch\") pod \"3c2a342b-5252-4957-9e2c-8f81c304b8af\" (UID: \"3c2a342b-5252-4957-9e2c-8f81c304b8af\") " Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.630464 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3c2a342b-5252-4957-9e2c-8f81c304b8af-var-lib-openvswitch\") pod \"3c2a342b-5252-4957-9e2c-8f81c304b8af\" (UID: \"3c2a342b-5252-4957-9e2c-8f81c304b8af\") " Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.630452 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3c2a342b-5252-4957-9e2c-8f81c304b8af-run-ovn" (OuterVolumeSpecName: "run-ovn") pod "3c2a342b-5252-4957-9e2c-8f81c304b8af" (UID: "3c2a342b-5252-4957-9e2c-8f81c304b8af"). InnerVolumeSpecName "run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.630486 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3c2a342b-5252-4957-9e2c-8f81c304b8af-run-openvswitch\") pod \"3c2a342b-5252-4957-9e2c-8f81c304b8af\" (UID: \"3c2a342b-5252-4957-9e2c-8f81c304b8af\") " Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.630505 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/3c2a342b-5252-4957-9e2c-8f81c304b8af-host-run-netns\") pod \"3c2a342b-5252-4957-9e2c-8f81c304b8af\" (UID: \"3c2a342b-5252-4957-9e2c-8f81c304b8af\") " Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.630532 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/3c2a342b-5252-4957-9e2c-8f81c304b8af-systemd-units\") pod \"3c2a342b-5252-4957-9e2c-8f81c304b8af\" (UID: \"3c2a342b-5252-4957-9e2c-8f81c304b8af\") " Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.630575 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/3c2a342b-5252-4957-9e2c-8f81c304b8af-ovn-node-metrics-cert\") pod \"3c2a342b-5252-4957-9e2c-8f81c304b8af\" (UID: \"3c2a342b-5252-4957-9e2c-8f81c304b8af\") " Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.630532 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3c2a342b-5252-4957-9e2c-8f81c304b8af-host-kubelet" (OuterVolumeSpecName: "host-kubelet") pod "3c2a342b-5252-4957-9e2c-8f81c304b8af" (UID: "3c2a342b-5252-4957-9e2c-8f81c304b8af"). InnerVolumeSpecName "host-kubelet". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.630602 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/3c2a342b-5252-4957-9e2c-8f81c304b8af-host-slash\") pod \"3c2a342b-5252-4957-9e2c-8f81c304b8af\" (UID: \"3c2a342b-5252-4957-9e2c-8f81c304b8af\") " Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.630625 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/3c2a342b-5252-4957-9e2c-8f81c304b8af-node-log\") pod \"3c2a342b-5252-4957-9e2c-8f81c304b8af\" (UID: \"3c2a342b-5252-4957-9e2c-8f81c304b8af\") " Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.630635 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3c2a342b-5252-4957-9e2c-8f81c304b8af-host-run-netns" (OuterVolumeSpecName: "host-run-netns") pod "3c2a342b-5252-4957-9e2c-8f81c304b8af" (UID: "3c2a342b-5252-4957-9e2c-8f81c304b8af"). InnerVolumeSpecName "host-run-netns". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.630654 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/3c2a342b-5252-4957-9e2c-8f81c304b8af-host-cni-netd\") pod \"3c2a342b-5252-4957-9e2c-8f81c304b8af\" (UID: \"3c2a342b-5252-4957-9e2c-8f81c304b8af\") " Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.630718 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3c2a342b-5252-4957-9e2c-8f81c304b8af-run-openvswitch" (OuterVolumeSpecName: "run-openvswitch") pod "3c2a342b-5252-4957-9e2c-8f81c304b8af" (UID: "3c2a342b-5252-4957-9e2c-8f81c304b8af"). InnerVolumeSpecName "run-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.630761 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3c2a342b-5252-4957-9e2c-8f81c304b8af-etc-openvswitch" (OuterVolumeSpecName: "etc-openvswitch") pod "3c2a342b-5252-4957-9e2c-8f81c304b8af" (UID: "3c2a342b-5252-4957-9e2c-8f81c304b8af"). InnerVolumeSpecName "etc-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.630720 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/3c2a342b-5252-4957-9e2c-8f81c304b8af-host-var-lib-cni-networks-ovn-kubernetes\") pod \"3c2a342b-5252-4957-9e2c-8f81c304b8af\" (UID: \"3c2a342b-5252-4957-9e2c-8f81c304b8af\") " Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.630735 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3c2a342b-5252-4957-9e2c-8f81c304b8af-var-lib-openvswitch" (OuterVolumeSpecName: "var-lib-openvswitch") pod "3c2a342b-5252-4957-9e2c-8f81c304b8af" (UID: "3c2a342b-5252-4957-9e2c-8f81c304b8af"). InnerVolumeSpecName "var-lib-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.630796 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3c2a342b-5252-4957-9e2c-8f81c304b8af-systemd-units" (OuterVolumeSpecName: "systemd-units") pod "3c2a342b-5252-4957-9e2c-8f81c304b8af" (UID: "3c2a342b-5252-4957-9e2c-8f81c304b8af"). InnerVolumeSpecName "systemd-units". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.630807 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/3c2a342b-5252-4957-9e2c-8f81c304b8af-host-cni-bin\") pod \"3c2a342b-5252-4957-9e2c-8f81c304b8af\" (UID: \"3c2a342b-5252-4957-9e2c-8f81c304b8af\") " Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.630796 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3c2a342b-5252-4957-9e2c-8f81c304b8af-host-slash" (OuterVolumeSpecName: "host-slash") pod "3c2a342b-5252-4957-9e2c-8f81c304b8af" (UID: "3c2a342b-5252-4957-9e2c-8f81c304b8af"). InnerVolumeSpecName "host-slash". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.630825 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3c2a342b-5252-4957-9e2c-8f81c304b8af-host-cni-netd" (OuterVolumeSpecName: "host-cni-netd") pod "3c2a342b-5252-4957-9e2c-8f81c304b8af" (UID: "3c2a342b-5252-4957-9e2c-8f81c304b8af"). InnerVolumeSpecName "host-cni-netd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.630837 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3c2a342b-5252-4957-9e2c-8f81c304b8af-host-var-lib-cni-networks-ovn-kubernetes" (OuterVolumeSpecName: "host-var-lib-cni-networks-ovn-kubernetes") pod "3c2a342b-5252-4957-9e2c-8f81c304b8af" (UID: "3c2a342b-5252-4957-9e2c-8f81c304b8af"). InnerVolumeSpecName "host-var-lib-cni-networks-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.630860 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3c2a342b-5252-4957-9e2c-8f81c304b8af-node-log" (OuterVolumeSpecName: "node-log") pod "3c2a342b-5252-4957-9e2c-8f81c304b8af" (UID: "3c2a342b-5252-4957-9e2c-8f81c304b8af"). InnerVolumeSpecName "node-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.630847 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3c2a342b-5252-4957-9e2c-8f81c304b8af-log-socket" (OuterVolumeSpecName: "log-socket") pod "3c2a342b-5252-4957-9e2c-8f81c304b8af" (UID: "3c2a342b-5252-4957-9e2c-8f81c304b8af"). InnerVolumeSpecName "log-socket". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.631030 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3c2a342b-5252-4957-9e2c-8f81c304b8af-host-cni-bin" (OuterVolumeSpecName: "host-cni-bin") pod "3c2a342b-5252-4957-9e2c-8f81c304b8af" (UID: "3c2a342b-5252-4957-9e2c-8f81c304b8af"). InnerVolumeSpecName "host-cni-bin". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.631104 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/f0d6d221-31f9-42f3-b699-7d07a63e8b30-run-ovn\") pod \"ovnkube-node-t5nsh\" (UID: \"f0d6d221-31f9-42f3-b699-7d07a63e8b30\") " pod="openshift-ovn-kubernetes/ovnkube-node-t5nsh" Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.631144 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/f0d6d221-31f9-42f3-b699-7d07a63e8b30-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-t5nsh\" (UID: \"f0d6d221-31f9-42f3-b699-7d07a63e8b30\") " pod="openshift-ovn-kubernetes/ovnkube-node-t5nsh" Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.631173 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/f0d6d221-31f9-42f3-b699-7d07a63e8b30-env-overrides\") pod \"ovnkube-node-t5nsh\" (UID: \"f0d6d221-31f9-42f3-b699-7d07a63e8b30\") " pod="openshift-ovn-kubernetes/ovnkube-node-t5nsh" Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.631201 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/f0d6d221-31f9-42f3-b699-7d07a63e8b30-host-slash\") pod \"ovnkube-node-t5nsh\" (UID: \"f0d6d221-31f9-42f3-b699-7d07a63e8b30\") " pod="openshift-ovn-kubernetes/ovnkube-node-t5nsh" Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.631231 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/f0d6d221-31f9-42f3-b699-7d07a63e8b30-run-systemd\") pod \"ovnkube-node-t5nsh\" (UID: \"f0d6d221-31f9-42f3-b699-7d07a63e8b30\") " pod="openshift-ovn-kubernetes/ovnkube-node-t5nsh" Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.631250 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/f0d6d221-31f9-42f3-b699-7d07a63e8b30-node-log\") pod \"ovnkube-node-t5nsh\" (UID: \"f0d6d221-31f9-42f3-b699-7d07a63e8b30\") " pod="openshift-ovn-kubernetes/ovnkube-node-t5nsh" Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.631274 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/f0d6d221-31f9-42f3-b699-7d07a63e8b30-ovnkube-config\") pod \"ovnkube-node-t5nsh\" (UID: \"f0d6d221-31f9-42f3-b699-7d07a63e8b30\") " pod="openshift-ovn-kubernetes/ovnkube-node-t5nsh" Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.631294 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/f0d6d221-31f9-42f3-b699-7d07a63e8b30-log-socket\") pod \"ovnkube-node-t5nsh\" (UID: \"f0d6d221-31f9-42f3-b699-7d07a63e8b30\") " pod="openshift-ovn-kubernetes/ovnkube-node-t5nsh" Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.631314 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/f0d6d221-31f9-42f3-b699-7d07a63e8b30-host-run-netns\") pod \"ovnkube-node-t5nsh\" (UID: \"f0d6d221-31f9-42f3-b699-7d07a63e8b30\") " pod="openshift-ovn-kubernetes/ovnkube-node-t5nsh" Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.631334 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/f0d6d221-31f9-42f3-b699-7d07a63e8b30-var-lib-openvswitch\") pod \"ovnkube-node-t5nsh\" (UID: \"f0d6d221-31f9-42f3-b699-7d07a63e8b30\") " pod="openshift-ovn-kubernetes/ovnkube-node-t5nsh" Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.631352 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/f0d6d221-31f9-42f3-b699-7d07a63e8b30-host-cni-bin\") pod \"ovnkube-node-t5nsh\" (UID: \"f0d6d221-31f9-42f3-b699-7d07a63e8b30\") " pod="openshift-ovn-kubernetes/ovnkube-node-t5nsh" Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.631379 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/f0d6d221-31f9-42f3-b699-7d07a63e8b30-ovn-node-metrics-cert\") pod \"ovnkube-node-t5nsh\" (UID: \"f0d6d221-31f9-42f3-b699-7d07a63e8b30\") " pod="openshift-ovn-kubernetes/ovnkube-node-t5nsh" Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.631416 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/f0d6d221-31f9-42f3-b699-7d07a63e8b30-systemd-units\") pod \"ovnkube-node-t5nsh\" (UID: \"f0d6d221-31f9-42f3-b699-7d07a63e8b30\") " pod="openshift-ovn-kubernetes/ovnkube-node-t5nsh" Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.631435 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3c2a342b-5252-4957-9e2c-8f81c304b8af-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "3c2a342b-5252-4957-9e2c-8f81c304b8af" (UID: "3c2a342b-5252-4957-9e2c-8f81c304b8af"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.631443 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/f0d6d221-31f9-42f3-b699-7d07a63e8b30-host-kubelet\") pod \"ovnkube-node-t5nsh\" (UID: \"f0d6d221-31f9-42f3-b699-7d07a63e8b30\") " pod="openshift-ovn-kubernetes/ovnkube-node-t5nsh" Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.631684 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3c2a342b-5252-4957-9e2c-8f81c304b8af-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "3c2a342b-5252-4957-9e2c-8f81c304b8af" (UID: "3c2a342b-5252-4957-9e2c-8f81c304b8af"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.631709 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/f0d6d221-31f9-42f3-b699-7d07a63e8b30-etc-openvswitch\") pod \"ovnkube-node-t5nsh\" (UID: \"f0d6d221-31f9-42f3-b699-7d07a63e8b30\") " pod="openshift-ovn-kubernetes/ovnkube-node-t5nsh" Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.631763 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/f0d6d221-31f9-42f3-b699-7d07a63e8b30-ovnkube-script-lib\") pod \"ovnkube-node-t5nsh\" (UID: \"f0d6d221-31f9-42f3-b699-7d07a63e8b30\") " pod="openshift-ovn-kubernetes/ovnkube-node-t5nsh" Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.632029 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tj4gz\" (UniqueName: \"kubernetes.io/projected/f0d6d221-31f9-42f3-b699-7d07a63e8b30-kube-api-access-tj4gz\") pod \"ovnkube-node-t5nsh\" (UID: \"f0d6d221-31f9-42f3-b699-7d07a63e8b30\") " pod="openshift-ovn-kubernetes/ovnkube-node-t5nsh" Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.632111 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/f0d6d221-31f9-42f3-b699-7d07a63e8b30-host-run-ovn-kubernetes\") pod \"ovnkube-node-t5nsh\" (UID: \"f0d6d221-31f9-42f3-b699-7d07a63e8b30\") " pod="openshift-ovn-kubernetes/ovnkube-node-t5nsh" Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.632152 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/f0d6d221-31f9-42f3-b699-7d07a63e8b30-run-openvswitch\") pod \"ovnkube-node-t5nsh\" (UID: \"f0d6d221-31f9-42f3-b699-7d07a63e8b30\") " pod="openshift-ovn-kubernetes/ovnkube-node-t5nsh" Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.632210 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/f0d6d221-31f9-42f3-b699-7d07a63e8b30-host-cni-netd\") pod \"ovnkube-node-t5nsh\" (UID: \"f0d6d221-31f9-42f3-b699-7d07a63e8b30\") " pod="openshift-ovn-kubernetes/ovnkube-node-t5nsh" Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.632330 4798 reconciler_common.go:293] "Volume detached for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/3c2a342b-5252-4957-9e2c-8f81c304b8af-host-slash\") on node \"crc\" DevicePath \"\"" Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.632371 4798 reconciler_common.go:293] "Volume detached for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/3c2a342b-5252-4957-9e2c-8f81c304b8af-node-log\") on node \"crc\" DevicePath \"\"" Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.632385 4798 reconciler_common.go:293] "Volume detached for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/3c2a342b-5252-4957-9e2c-8f81c304b8af-host-cni-netd\") on node \"crc\" DevicePath \"\"" Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.632417 4798 reconciler_common.go:293] "Volume detached for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/3c2a342b-5252-4957-9e2c-8f81c304b8af-host-var-lib-cni-networks-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.632432 4798 reconciler_common.go:293] "Volume detached for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/3c2a342b-5252-4957-9e2c-8f81c304b8af-host-cni-bin\") on node \"crc\" DevicePath \"\"" Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.632445 4798 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/3c2a342b-5252-4957-9e2c-8f81c304b8af-env-overrides\") on node \"crc\" DevicePath \"\"" Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.632458 4798 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/3c2a342b-5252-4957-9e2c-8f81c304b8af-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.632474 4798 reconciler_common.go:293] "Volume detached for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/3c2a342b-5252-4957-9e2c-8f81c304b8af-host-run-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.632489 4798 reconciler_common.go:293] "Volume detached for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/3c2a342b-5252-4957-9e2c-8f81c304b8af-run-ovn\") on node \"crc\" DevicePath \"\"" Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.632500 4798 reconciler_common.go:293] "Volume detached for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/3c2a342b-5252-4957-9e2c-8f81c304b8af-host-kubelet\") on node \"crc\" DevicePath \"\"" Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.632512 4798 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/3c2a342b-5252-4957-9e2c-8f81c304b8af-ovnkube-config\") on node \"crc\" DevicePath \"\"" Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.632525 4798 reconciler_common.go:293] "Volume detached for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/3c2a342b-5252-4957-9e2c-8f81c304b8af-log-socket\") on node \"crc\" DevicePath \"\"" Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.632537 4798 reconciler_common.go:293] "Volume detached for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3c2a342b-5252-4957-9e2c-8f81c304b8af-etc-openvswitch\") on node \"crc\" DevicePath \"\"" Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.632553 4798 reconciler_common.go:293] "Volume detached for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3c2a342b-5252-4957-9e2c-8f81c304b8af-var-lib-openvswitch\") on node \"crc\" DevicePath \"\"" Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.632565 4798 reconciler_common.go:293] "Volume detached for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/3c2a342b-5252-4957-9e2c-8f81c304b8af-host-run-netns\") on node \"crc\" DevicePath \"\"" Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.632576 4798 reconciler_common.go:293] "Volume detached for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3c2a342b-5252-4957-9e2c-8f81c304b8af-run-openvswitch\") on node \"crc\" DevicePath \"\"" Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.632587 4798 reconciler_common.go:293] "Volume detached for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/3c2a342b-5252-4957-9e2c-8f81c304b8af-systemd-units\") on node \"crc\" DevicePath \"\"" Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.637593 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3c2a342b-5252-4957-9e2c-8f81c304b8af-kube-api-access-jtk6h" (OuterVolumeSpecName: "kube-api-access-jtk6h") pod "3c2a342b-5252-4957-9e2c-8f81c304b8af" (UID: "3c2a342b-5252-4957-9e2c-8f81c304b8af"). InnerVolumeSpecName "kube-api-access-jtk6h". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.638470 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3c2a342b-5252-4957-9e2c-8f81c304b8af-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "3c2a342b-5252-4957-9e2c-8f81c304b8af" (UID: "3c2a342b-5252-4957-9e2c-8f81c304b8af"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.648704 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3c2a342b-5252-4957-9e2c-8f81c304b8af-run-systemd" (OuterVolumeSpecName: "run-systemd") pod "3c2a342b-5252-4957-9e2c-8f81c304b8af" (UID: "3c2a342b-5252-4957-9e2c-8f81c304b8af"). InnerVolumeSpecName "run-systemd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.733496 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/f0d6d221-31f9-42f3-b699-7d07a63e8b30-etc-openvswitch\") pod \"ovnkube-node-t5nsh\" (UID: \"f0d6d221-31f9-42f3-b699-7d07a63e8b30\") " pod="openshift-ovn-kubernetes/ovnkube-node-t5nsh" Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.733375 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/f0d6d221-31f9-42f3-b699-7d07a63e8b30-etc-openvswitch\") pod \"ovnkube-node-t5nsh\" (UID: \"f0d6d221-31f9-42f3-b699-7d07a63e8b30\") " pod="openshift-ovn-kubernetes/ovnkube-node-t5nsh" Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.733608 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/f0d6d221-31f9-42f3-b699-7d07a63e8b30-ovnkube-script-lib\") pod \"ovnkube-node-t5nsh\" (UID: \"f0d6d221-31f9-42f3-b699-7d07a63e8b30\") " pod="openshift-ovn-kubernetes/ovnkube-node-t5nsh" Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.733645 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tj4gz\" (UniqueName: \"kubernetes.io/projected/f0d6d221-31f9-42f3-b699-7d07a63e8b30-kube-api-access-tj4gz\") pod \"ovnkube-node-t5nsh\" (UID: \"f0d6d221-31f9-42f3-b699-7d07a63e8b30\") " pod="openshift-ovn-kubernetes/ovnkube-node-t5nsh" Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.733669 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/f0d6d221-31f9-42f3-b699-7d07a63e8b30-host-run-ovn-kubernetes\") pod \"ovnkube-node-t5nsh\" (UID: \"f0d6d221-31f9-42f3-b699-7d07a63e8b30\") " pod="openshift-ovn-kubernetes/ovnkube-node-t5nsh" Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.733691 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/f0d6d221-31f9-42f3-b699-7d07a63e8b30-run-openvswitch\") pod \"ovnkube-node-t5nsh\" (UID: \"f0d6d221-31f9-42f3-b699-7d07a63e8b30\") " pod="openshift-ovn-kubernetes/ovnkube-node-t5nsh" Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.733719 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/f0d6d221-31f9-42f3-b699-7d07a63e8b30-host-cni-netd\") pod \"ovnkube-node-t5nsh\" (UID: \"f0d6d221-31f9-42f3-b699-7d07a63e8b30\") " pod="openshift-ovn-kubernetes/ovnkube-node-t5nsh" Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.733746 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/f0d6d221-31f9-42f3-b699-7d07a63e8b30-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-t5nsh\" (UID: \"f0d6d221-31f9-42f3-b699-7d07a63e8b30\") " pod="openshift-ovn-kubernetes/ovnkube-node-t5nsh" Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.733776 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/f0d6d221-31f9-42f3-b699-7d07a63e8b30-run-ovn\") pod \"ovnkube-node-t5nsh\" (UID: \"f0d6d221-31f9-42f3-b699-7d07a63e8b30\") " pod="openshift-ovn-kubernetes/ovnkube-node-t5nsh" Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.733805 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/f0d6d221-31f9-42f3-b699-7d07a63e8b30-env-overrides\") pod \"ovnkube-node-t5nsh\" (UID: \"f0d6d221-31f9-42f3-b699-7d07a63e8b30\") " pod="openshift-ovn-kubernetes/ovnkube-node-t5nsh" Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.733832 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/f0d6d221-31f9-42f3-b699-7d07a63e8b30-host-slash\") pod \"ovnkube-node-t5nsh\" (UID: \"f0d6d221-31f9-42f3-b699-7d07a63e8b30\") " pod="openshift-ovn-kubernetes/ovnkube-node-t5nsh" Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.733843 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/f0d6d221-31f9-42f3-b699-7d07a63e8b30-run-openvswitch\") pod \"ovnkube-node-t5nsh\" (UID: \"f0d6d221-31f9-42f3-b699-7d07a63e8b30\") " pod="openshift-ovn-kubernetes/ovnkube-node-t5nsh" Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.733891 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/f0d6d221-31f9-42f3-b699-7d07a63e8b30-run-systemd\") pod \"ovnkube-node-t5nsh\" (UID: \"f0d6d221-31f9-42f3-b699-7d07a63e8b30\") " pod="openshift-ovn-kubernetes/ovnkube-node-t5nsh" Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.733931 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/f0d6d221-31f9-42f3-b699-7d07a63e8b30-host-slash\") pod \"ovnkube-node-t5nsh\" (UID: \"f0d6d221-31f9-42f3-b699-7d07a63e8b30\") " pod="openshift-ovn-kubernetes/ovnkube-node-t5nsh" Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.733862 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/f0d6d221-31f9-42f3-b699-7d07a63e8b30-run-systemd\") pod \"ovnkube-node-t5nsh\" (UID: \"f0d6d221-31f9-42f3-b699-7d07a63e8b30\") " pod="openshift-ovn-kubernetes/ovnkube-node-t5nsh" Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.733981 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/f0d6d221-31f9-42f3-b699-7d07a63e8b30-run-ovn\") pod \"ovnkube-node-t5nsh\" (UID: \"f0d6d221-31f9-42f3-b699-7d07a63e8b30\") " pod="openshift-ovn-kubernetes/ovnkube-node-t5nsh" Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.733998 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/f0d6d221-31f9-42f3-b699-7d07a63e8b30-host-run-ovn-kubernetes\") pod \"ovnkube-node-t5nsh\" (UID: \"f0d6d221-31f9-42f3-b699-7d07a63e8b30\") " pod="openshift-ovn-kubernetes/ovnkube-node-t5nsh" Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.733938 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/f0d6d221-31f9-42f3-b699-7d07a63e8b30-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-t5nsh\" (UID: \"f0d6d221-31f9-42f3-b699-7d07a63e8b30\") " pod="openshift-ovn-kubernetes/ovnkube-node-t5nsh" Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.733957 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/f0d6d221-31f9-42f3-b699-7d07a63e8b30-host-cni-netd\") pod \"ovnkube-node-t5nsh\" (UID: \"f0d6d221-31f9-42f3-b699-7d07a63e8b30\") " pod="openshift-ovn-kubernetes/ovnkube-node-t5nsh" Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.734090 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/f0d6d221-31f9-42f3-b699-7d07a63e8b30-node-log\") pod \"ovnkube-node-t5nsh\" (UID: \"f0d6d221-31f9-42f3-b699-7d07a63e8b30\") " pod="openshift-ovn-kubernetes/ovnkube-node-t5nsh" Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.734159 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/f0d6d221-31f9-42f3-b699-7d07a63e8b30-ovnkube-config\") pod \"ovnkube-node-t5nsh\" (UID: \"f0d6d221-31f9-42f3-b699-7d07a63e8b30\") " pod="openshift-ovn-kubernetes/ovnkube-node-t5nsh" Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.734103 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/f0d6d221-31f9-42f3-b699-7d07a63e8b30-node-log\") pod \"ovnkube-node-t5nsh\" (UID: \"f0d6d221-31f9-42f3-b699-7d07a63e8b30\") " pod="openshift-ovn-kubernetes/ovnkube-node-t5nsh" Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.734229 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/f0d6d221-31f9-42f3-b699-7d07a63e8b30-log-socket\") pod \"ovnkube-node-t5nsh\" (UID: \"f0d6d221-31f9-42f3-b699-7d07a63e8b30\") " pod="openshift-ovn-kubernetes/ovnkube-node-t5nsh" Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.734291 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/f0d6d221-31f9-42f3-b699-7d07a63e8b30-log-socket\") pod \"ovnkube-node-t5nsh\" (UID: \"f0d6d221-31f9-42f3-b699-7d07a63e8b30\") " pod="openshift-ovn-kubernetes/ovnkube-node-t5nsh" Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.734298 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/f0d6d221-31f9-42f3-b699-7d07a63e8b30-host-run-netns\") pod \"ovnkube-node-t5nsh\" (UID: \"f0d6d221-31f9-42f3-b699-7d07a63e8b30\") " pod="openshift-ovn-kubernetes/ovnkube-node-t5nsh" Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.734351 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/f0d6d221-31f9-42f3-b699-7d07a63e8b30-var-lib-openvswitch\") pod \"ovnkube-node-t5nsh\" (UID: \"f0d6d221-31f9-42f3-b699-7d07a63e8b30\") " pod="openshift-ovn-kubernetes/ovnkube-node-t5nsh" Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.734424 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/f0d6d221-31f9-42f3-b699-7d07a63e8b30-host-run-netns\") pod \"ovnkube-node-t5nsh\" (UID: \"f0d6d221-31f9-42f3-b699-7d07a63e8b30\") " pod="openshift-ovn-kubernetes/ovnkube-node-t5nsh" Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.734434 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/f0d6d221-31f9-42f3-b699-7d07a63e8b30-host-cni-bin\") pod \"ovnkube-node-t5nsh\" (UID: \"f0d6d221-31f9-42f3-b699-7d07a63e8b30\") " pod="openshift-ovn-kubernetes/ovnkube-node-t5nsh" Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.734479 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/f0d6d221-31f9-42f3-b699-7d07a63e8b30-host-cni-bin\") pod \"ovnkube-node-t5nsh\" (UID: \"f0d6d221-31f9-42f3-b699-7d07a63e8b30\") " pod="openshift-ovn-kubernetes/ovnkube-node-t5nsh" Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.734458 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/f0d6d221-31f9-42f3-b699-7d07a63e8b30-var-lib-openvswitch\") pod \"ovnkube-node-t5nsh\" (UID: \"f0d6d221-31f9-42f3-b699-7d07a63e8b30\") " pod="openshift-ovn-kubernetes/ovnkube-node-t5nsh" Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.734515 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/f0d6d221-31f9-42f3-b699-7d07a63e8b30-ovn-node-metrics-cert\") pod \"ovnkube-node-t5nsh\" (UID: \"f0d6d221-31f9-42f3-b699-7d07a63e8b30\") " pod="openshift-ovn-kubernetes/ovnkube-node-t5nsh" Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.734643 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/f0d6d221-31f9-42f3-b699-7d07a63e8b30-systemd-units\") pod \"ovnkube-node-t5nsh\" (UID: \"f0d6d221-31f9-42f3-b699-7d07a63e8b30\") " pod="openshift-ovn-kubernetes/ovnkube-node-t5nsh" Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.734726 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/f0d6d221-31f9-42f3-b699-7d07a63e8b30-systemd-units\") pod \"ovnkube-node-t5nsh\" (UID: \"f0d6d221-31f9-42f3-b699-7d07a63e8b30\") " pod="openshift-ovn-kubernetes/ovnkube-node-t5nsh" Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.734726 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/f0d6d221-31f9-42f3-b699-7d07a63e8b30-host-kubelet\") pod \"ovnkube-node-t5nsh\" (UID: \"f0d6d221-31f9-42f3-b699-7d07a63e8b30\") " pod="openshift-ovn-kubernetes/ovnkube-node-t5nsh" Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.734745 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/f0d6d221-31f9-42f3-b699-7d07a63e8b30-env-overrides\") pod \"ovnkube-node-t5nsh\" (UID: \"f0d6d221-31f9-42f3-b699-7d07a63e8b30\") " pod="openshift-ovn-kubernetes/ovnkube-node-t5nsh" Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.734767 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/f0d6d221-31f9-42f3-b699-7d07a63e8b30-host-kubelet\") pod \"ovnkube-node-t5nsh\" (UID: \"f0d6d221-31f9-42f3-b699-7d07a63e8b30\") " pod="openshift-ovn-kubernetes/ovnkube-node-t5nsh" Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.734901 4798 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/3c2a342b-5252-4957-9e2c-8f81c304b8af-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.734924 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jtk6h\" (UniqueName: \"kubernetes.io/projected/3c2a342b-5252-4957-9e2c-8f81c304b8af-kube-api-access-jtk6h\") on node \"crc\" DevicePath \"\"" Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.734934 4798 reconciler_common.go:293] "Volume detached for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/3c2a342b-5252-4957-9e2c-8f81c304b8af-run-systemd\") on node \"crc\" DevicePath \"\"" Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.735175 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/f0d6d221-31f9-42f3-b699-7d07a63e8b30-ovnkube-config\") pod \"ovnkube-node-t5nsh\" (UID: \"f0d6d221-31f9-42f3-b699-7d07a63e8b30\") " pod="openshift-ovn-kubernetes/ovnkube-node-t5nsh" Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.735723 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/f0d6d221-31f9-42f3-b699-7d07a63e8b30-ovnkube-script-lib\") pod \"ovnkube-node-t5nsh\" (UID: \"f0d6d221-31f9-42f3-b699-7d07a63e8b30\") " pod="openshift-ovn-kubernetes/ovnkube-node-t5nsh" Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.738953 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/f0d6d221-31f9-42f3-b699-7d07a63e8b30-ovn-node-metrics-cert\") pod \"ovnkube-node-t5nsh\" (UID: \"f0d6d221-31f9-42f3-b699-7d07a63e8b30\") " pod="openshift-ovn-kubernetes/ovnkube-node-t5nsh" Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.755157 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tj4gz\" (UniqueName: \"kubernetes.io/projected/f0d6d221-31f9-42f3-b699-7d07a63e8b30-kube-api-access-tj4gz\") pod \"ovnkube-node-t5nsh\" (UID: \"f0d6d221-31f9-42f3-b699-7d07a63e8b30\") " pod="openshift-ovn-kubernetes/ovnkube-node-t5nsh" Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.802874 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-svt4z_3c2a342b-5252-4957-9e2c-8f81c304b8af/ovnkube-controller/3.log" Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.808664 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-svt4z_3c2a342b-5252-4957-9e2c-8f81c304b8af/ovn-acl-logging/0.log" Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.809993 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-svt4z_3c2a342b-5252-4957-9e2c-8f81c304b8af/ovn-controller/0.log" Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.811302 4798 generic.go:334] "Generic (PLEG): container finished" podID="3c2a342b-5252-4957-9e2c-8f81c304b8af" containerID="25f75915fce6470de042b47f0a1f09f1debcb13ab242d5e2a6bd64c4626c72e8" exitCode=0 Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.811435 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-svt4z" event={"ID":"3c2a342b-5252-4957-9e2c-8f81c304b8af","Type":"ContainerDied","Data":"25f75915fce6470de042b47f0a1f09f1debcb13ab242d5e2a6bd64c4626c72e8"} Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.811476 4798 generic.go:334] "Generic (PLEG): container finished" podID="3c2a342b-5252-4957-9e2c-8f81c304b8af" containerID="732433bbe785758ed4cbedf667608c4780a5d1e7e2eafd432735f0d74ea01d48" exitCode=0 Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.811510 4798 generic.go:334] "Generic (PLEG): container finished" podID="3c2a342b-5252-4957-9e2c-8f81c304b8af" containerID="dd63d88fe2ebe6035765ee3f3c05c2ef747f9b2869da239825c08667935c9fb5" exitCode=0 Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.811531 4798 generic.go:334] "Generic (PLEG): container finished" podID="3c2a342b-5252-4957-9e2c-8f81c304b8af" containerID="671945e25068ff79ab8574dd96b2d81646b8240da41f34a10aa857f0c2520df2" exitCode=0 Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.811550 4798 generic.go:334] "Generic (PLEG): container finished" podID="3c2a342b-5252-4957-9e2c-8f81c304b8af" containerID="5667e3d7288833ff8c576f64f111c3a2f59619a3577f969d0afdd633962bd30d" exitCode=0 Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.811578 4798 generic.go:334] "Generic (PLEG): container finished" podID="3c2a342b-5252-4957-9e2c-8f81c304b8af" containerID="e5a66de04a274f07f090f66d9da727e1985f9c171038de366b409a0300d1b6cc" exitCode=0 Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.811600 4798 generic.go:334] "Generic (PLEG): container finished" podID="3c2a342b-5252-4957-9e2c-8f81c304b8af" containerID="337fb496a29d8027f6400b57b1625b68029a16e368139f753b9b94df22448477" exitCode=143 Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.811609 4798 scope.go:117] "RemoveContainer" containerID="25f75915fce6470de042b47f0a1f09f1debcb13ab242d5e2a6bd64c4626c72e8" Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.811622 4798 generic.go:334] "Generic (PLEG): container finished" podID="3c2a342b-5252-4957-9e2c-8f81c304b8af" containerID="4c9529e6c53d42e111799753c41ef620b382b2a2b1991aa71d4ee846029a5136" exitCode=143 Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.811553 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-svt4z" event={"ID":"3c2a342b-5252-4957-9e2c-8f81c304b8af","Type":"ContainerDied","Data":"732433bbe785758ed4cbedf667608c4780a5d1e7e2eafd432735f0d74ea01d48"} Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.811628 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-svt4z" Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.811726 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-svt4z" event={"ID":"3c2a342b-5252-4957-9e2c-8f81c304b8af","Type":"ContainerDied","Data":"dd63d88fe2ebe6035765ee3f3c05c2ef747f9b2869da239825c08667935c9fb5"} Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.812167 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-svt4z" event={"ID":"3c2a342b-5252-4957-9e2c-8f81c304b8af","Type":"ContainerDied","Data":"671945e25068ff79ab8574dd96b2d81646b8240da41f34a10aa857f0c2520df2"} Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.812238 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-svt4z" event={"ID":"3c2a342b-5252-4957-9e2c-8f81c304b8af","Type":"ContainerDied","Data":"5667e3d7288833ff8c576f64f111c3a2f59619a3577f969d0afdd633962bd30d"} Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.812267 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-svt4z" event={"ID":"3c2a342b-5252-4957-9e2c-8f81c304b8af","Type":"ContainerDied","Data":"e5a66de04a274f07f090f66d9da727e1985f9c171038de366b409a0300d1b6cc"} Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.812318 4798 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"66e2aa76f109bbdb0ec131c0724c460f5c1313b096e2fb3ebf1e42fd4832c88b"} Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.812345 4798 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"732433bbe785758ed4cbedf667608c4780a5d1e7e2eafd432735f0d74ea01d48"} Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.812359 4798 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"dd63d88fe2ebe6035765ee3f3c05c2ef747f9b2869da239825c08667935c9fb5"} Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.812371 4798 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"671945e25068ff79ab8574dd96b2d81646b8240da41f34a10aa857f0c2520df2"} Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.812384 4798 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"5667e3d7288833ff8c576f64f111c3a2f59619a3577f969d0afdd633962bd30d"} Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.812423 4798 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"e5a66de04a274f07f090f66d9da727e1985f9c171038de366b409a0300d1b6cc"} Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.812436 4798 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"337fb496a29d8027f6400b57b1625b68029a16e368139f753b9b94df22448477"} Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.812448 4798 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"4c9529e6c53d42e111799753c41ef620b382b2a2b1991aa71d4ee846029a5136"} Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.812460 4798 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"9f8c139ffa0a5dc24b40f6d5bcc2fa9422f58b275692d3ce3ba5a928c1e454a7"} Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.812476 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-svt4z" event={"ID":"3c2a342b-5252-4957-9e2c-8f81c304b8af","Type":"ContainerDied","Data":"337fb496a29d8027f6400b57b1625b68029a16e368139f753b9b94df22448477"} Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.812496 4798 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"25f75915fce6470de042b47f0a1f09f1debcb13ab242d5e2a6bd64c4626c72e8"} Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.812510 4798 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"66e2aa76f109bbdb0ec131c0724c460f5c1313b096e2fb3ebf1e42fd4832c88b"} Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.812522 4798 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"732433bbe785758ed4cbedf667608c4780a5d1e7e2eafd432735f0d74ea01d48"} Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.812535 4798 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"dd63d88fe2ebe6035765ee3f3c05c2ef747f9b2869da239825c08667935c9fb5"} Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.812547 4798 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"671945e25068ff79ab8574dd96b2d81646b8240da41f34a10aa857f0c2520df2"} Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.812559 4798 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"5667e3d7288833ff8c576f64f111c3a2f59619a3577f969d0afdd633962bd30d"} Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.812570 4798 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"e5a66de04a274f07f090f66d9da727e1985f9c171038de366b409a0300d1b6cc"} Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.812582 4798 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"337fb496a29d8027f6400b57b1625b68029a16e368139f753b9b94df22448477"} Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.812596 4798 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"4c9529e6c53d42e111799753c41ef620b382b2a2b1991aa71d4ee846029a5136"} Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.812608 4798 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"9f8c139ffa0a5dc24b40f6d5bcc2fa9422f58b275692d3ce3ba5a928c1e454a7"} Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.812623 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-svt4z" event={"ID":"3c2a342b-5252-4957-9e2c-8f81c304b8af","Type":"ContainerDied","Data":"4c9529e6c53d42e111799753c41ef620b382b2a2b1991aa71d4ee846029a5136"} Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.812642 4798 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"25f75915fce6470de042b47f0a1f09f1debcb13ab242d5e2a6bd64c4626c72e8"} Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.812655 4798 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"66e2aa76f109bbdb0ec131c0724c460f5c1313b096e2fb3ebf1e42fd4832c88b"} Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.812666 4798 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"732433bbe785758ed4cbedf667608c4780a5d1e7e2eafd432735f0d74ea01d48"} Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.812678 4798 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"dd63d88fe2ebe6035765ee3f3c05c2ef747f9b2869da239825c08667935c9fb5"} Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.812689 4798 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"671945e25068ff79ab8574dd96b2d81646b8240da41f34a10aa857f0c2520df2"} Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.812700 4798 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"5667e3d7288833ff8c576f64f111c3a2f59619a3577f969d0afdd633962bd30d"} Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.812711 4798 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"e5a66de04a274f07f090f66d9da727e1985f9c171038de366b409a0300d1b6cc"} Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.812723 4798 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"337fb496a29d8027f6400b57b1625b68029a16e368139f753b9b94df22448477"} Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.812735 4798 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"4c9529e6c53d42e111799753c41ef620b382b2a2b1991aa71d4ee846029a5136"} Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.812748 4798 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"9f8c139ffa0a5dc24b40f6d5bcc2fa9422f58b275692d3ce3ba5a928c1e454a7"} Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.812763 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-svt4z" event={"ID":"3c2a342b-5252-4957-9e2c-8f81c304b8af","Type":"ContainerDied","Data":"276a12e25ec2902ce6c28d1df642a186aaae40e269e8a02bfc45420aa26e18c7"} Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.812782 4798 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"25f75915fce6470de042b47f0a1f09f1debcb13ab242d5e2a6bd64c4626c72e8"} Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.812797 4798 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"66e2aa76f109bbdb0ec131c0724c460f5c1313b096e2fb3ebf1e42fd4832c88b"} Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.812809 4798 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"732433bbe785758ed4cbedf667608c4780a5d1e7e2eafd432735f0d74ea01d48"} Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.812820 4798 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"dd63d88fe2ebe6035765ee3f3c05c2ef747f9b2869da239825c08667935c9fb5"} Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.812831 4798 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"671945e25068ff79ab8574dd96b2d81646b8240da41f34a10aa857f0c2520df2"} Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.812842 4798 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"5667e3d7288833ff8c576f64f111c3a2f59619a3577f969d0afdd633962bd30d"} Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.812853 4798 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"e5a66de04a274f07f090f66d9da727e1985f9c171038de366b409a0300d1b6cc"} Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.812863 4798 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"337fb496a29d8027f6400b57b1625b68029a16e368139f753b9b94df22448477"} Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.812876 4798 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"4c9529e6c53d42e111799753c41ef620b382b2a2b1991aa71d4ee846029a5136"} Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.812887 4798 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"9f8c139ffa0a5dc24b40f6d5bcc2fa9422f58b275692d3ce3ba5a928c1e454a7"} Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.818688 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-p6xdd_bd9c0e44-3329-422a-907b-e9e9bb6194cc/kube-multus/2.log" Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.819976 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-p6xdd_bd9c0e44-3329-422a-907b-e9e9bb6194cc/kube-multus/1.log" Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.820052 4798 generic.go:334] "Generic (PLEG): container finished" podID="bd9c0e44-3329-422a-907b-e9e9bb6194cc" containerID="23e0ec8a9881a6c50cee2b2d1a8a8ee8cff0b43249558238022f9310a865b8a2" exitCode=2 Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.820117 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-p6xdd" event={"ID":"bd9c0e44-3329-422a-907b-e9e9bb6194cc","Type":"ContainerDied","Data":"23e0ec8a9881a6c50cee2b2d1a8a8ee8cff0b43249558238022f9310a865b8a2"} Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.820206 4798 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"b5b37de1a8a4b221f421a2bf4f4706f9b2e4ce65611438e10e1bf07daa2bf25a"} Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.821474 4798 scope.go:117] "RemoveContainer" containerID="23e0ec8a9881a6c50cee2b2d1a8a8ee8cff0b43249558238022f9310a865b8a2" Oct 11 04:04:31 crc kubenswrapper[4798]: E1011 04:04:31.821948 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 20s restarting failed container=kube-multus pod=multus-p6xdd_openshift-multus(bd9c0e44-3329-422a-907b-e9e9bb6194cc)\"" pod="openshift-multus/multus-p6xdd" podUID="bd9c0e44-3329-422a-907b-e9e9bb6194cc" Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.840921 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-t5nsh" Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.854962 4798 scope.go:117] "RemoveContainer" containerID="66e2aa76f109bbdb0ec131c0724c460f5c1313b096e2fb3ebf1e42fd4832c88b" Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.881804 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-svt4z"] Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.886535 4798 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-svt4z"] Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.886770 4798 scope.go:117] "RemoveContainer" containerID="732433bbe785758ed4cbedf667608c4780a5d1e7e2eafd432735f0d74ea01d48" Oct 11 04:04:31 crc kubenswrapper[4798]: W1011 04:04:31.898954 4798 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf0d6d221_31f9_42f3_b699_7d07a63e8b30.slice/crio-84c89abf07d83c213d427cc14a54a67585a4c89552b502168da279e666c8a004 WatchSource:0}: Error finding container 84c89abf07d83c213d427cc14a54a67585a4c89552b502168da279e666c8a004: Status 404 returned error can't find the container with id 84c89abf07d83c213d427cc14a54a67585a4c89552b502168da279e666c8a004 Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.911316 4798 scope.go:117] "RemoveContainer" containerID="dd63d88fe2ebe6035765ee3f3c05c2ef747f9b2869da239825c08667935c9fb5" Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.943917 4798 scope.go:117] "RemoveContainer" containerID="671945e25068ff79ab8574dd96b2d81646b8240da41f34a10aa857f0c2520df2" Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.965333 4798 scope.go:117] "RemoveContainer" containerID="5667e3d7288833ff8c576f64f111c3a2f59619a3577f969d0afdd633962bd30d" Oct 11 04:04:31 crc kubenswrapper[4798]: I1011 04:04:31.989965 4798 scope.go:117] "RemoveContainer" containerID="e5a66de04a274f07f090f66d9da727e1985f9c171038de366b409a0300d1b6cc" Oct 11 04:04:32 crc kubenswrapper[4798]: I1011 04:04:32.027127 4798 scope.go:117] "RemoveContainer" containerID="337fb496a29d8027f6400b57b1625b68029a16e368139f753b9b94df22448477" Oct 11 04:04:32 crc kubenswrapper[4798]: I1011 04:04:32.055648 4798 scope.go:117] "RemoveContainer" containerID="4c9529e6c53d42e111799753c41ef620b382b2a2b1991aa71d4ee846029a5136" Oct 11 04:04:32 crc kubenswrapper[4798]: I1011 04:04:32.084058 4798 scope.go:117] "RemoveContainer" containerID="9f8c139ffa0a5dc24b40f6d5bcc2fa9422f58b275692d3ce3ba5a928c1e454a7" Oct 11 04:04:32 crc kubenswrapper[4798]: I1011 04:04:32.149737 4798 scope.go:117] "RemoveContainer" containerID="25f75915fce6470de042b47f0a1f09f1debcb13ab242d5e2a6bd64c4626c72e8" Oct 11 04:04:32 crc kubenswrapper[4798]: E1011 04:04:32.150845 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"25f75915fce6470de042b47f0a1f09f1debcb13ab242d5e2a6bd64c4626c72e8\": container with ID starting with 25f75915fce6470de042b47f0a1f09f1debcb13ab242d5e2a6bd64c4626c72e8 not found: ID does not exist" containerID="25f75915fce6470de042b47f0a1f09f1debcb13ab242d5e2a6bd64c4626c72e8" Oct 11 04:04:32 crc kubenswrapper[4798]: I1011 04:04:32.150919 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"25f75915fce6470de042b47f0a1f09f1debcb13ab242d5e2a6bd64c4626c72e8"} err="failed to get container status \"25f75915fce6470de042b47f0a1f09f1debcb13ab242d5e2a6bd64c4626c72e8\": rpc error: code = NotFound desc = could not find container \"25f75915fce6470de042b47f0a1f09f1debcb13ab242d5e2a6bd64c4626c72e8\": container with ID starting with 25f75915fce6470de042b47f0a1f09f1debcb13ab242d5e2a6bd64c4626c72e8 not found: ID does not exist" Oct 11 04:04:32 crc kubenswrapper[4798]: I1011 04:04:32.150954 4798 scope.go:117] "RemoveContainer" containerID="66e2aa76f109bbdb0ec131c0724c460f5c1313b096e2fb3ebf1e42fd4832c88b" Oct 11 04:04:32 crc kubenswrapper[4798]: E1011 04:04:32.151687 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"66e2aa76f109bbdb0ec131c0724c460f5c1313b096e2fb3ebf1e42fd4832c88b\": container with ID starting with 66e2aa76f109bbdb0ec131c0724c460f5c1313b096e2fb3ebf1e42fd4832c88b not found: ID does not exist" containerID="66e2aa76f109bbdb0ec131c0724c460f5c1313b096e2fb3ebf1e42fd4832c88b" Oct 11 04:04:32 crc kubenswrapper[4798]: I1011 04:04:32.151760 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"66e2aa76f109bbdb0ec131c0724c460f5c1313b096e2fb3ebf1e42fd4832c88b"} err="failed to get container status \"66e2aa76f109bbdb0ec131c0724c460f5c1313b096e2fb3ebf1e42fd4832c88b\": rpc error: code = NotFound desc = could not find container \"66e2aa76f109bbdb0ec131c0724c460f5c1313b096e2fb3ebf1e42fd4832c88b\": container with ID starting with 66e2aa76f109bbdb0ec131c0724c460f5c1313b096e2fb3ebf1e42fd4832c88b not found: ID does not exist" Oct 11 04:04:32 crc kubenswrapper[4798]: I1011 04:04:32.151809 4798 scope.go:117] "RemoveContainer" containerID="732433bbe785758ed4cbedf667608c4780a5d1e7e2eafd432735f0d74ea01d48" Oct 11 04:04:32 crc kubenswrapper[4798]: E1011 04:04:32.152699 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"732433bbe785758ed4cbedf667608c4780a5d1e7e2eafd432735f0d74ea01d48\": container with ID starting with 732433bbe785758ed4cbedf667608c4780a5d1e7e2eafd432735f0d74ea01d48 not found: ID does not exist" containerID="732433bbe785758ed4cbedf667608c4780a5d1e7e2eafd432735f0d74ea01d48" Oct 11 04:04:32 crc kubenswrapper[4798]: I1011 04:04:32.152816 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"732433bbe785758ed4cbedf667608c4780a5d1e7e2eafd432735f0d74ea01d48"} err="failed to get container status \"732433bbe785758ed4cbedf667608c4780a5d1e7e2eafd432735f0d74ea01d48\": rpc error: code = NotFound desc = could not find container \"732433bbe785758ed4cbedf667608c4780a5d1e7e2eafd432735f0d74ea01d48\": container with ID starting with 732433bbe785758ed4cbedf667608c4780a5d1e7e2eafd432735f0d74ea01d48 not found: ID does not exist" Oct 11 04:04:32 crc kubenswrapper[4798]: I1011 04:04:32.152889 4798 scope.go:117] "RemoveContainer" containerID="dd63d88fe2ebe6035765ee3f3c05c2ef747f9b2869da239825c08667935c9fb5" Oct 11 04:04:32 crc kubenswrapper[4798]: E1011 04:04:32.153386 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"dd63d88fe2ebe6035765ee3f3c05c2ef747f9b2869da239825c08667935c9fb5\": container with ID starting with dd63d88fe2ebe6035765ee3f3c05c2ef747f9b2869da239825c08667935c9fb5 not found: ID does not exist" containerID="dd63d88fe2ebe6035765ee3f3c05c2ef747f9b2869da239825c08667935c9fb5" Oct 11 04:04:32 crc kubenswrapper[4798]: I1011 04:04:32.153434 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dd63d88fe2ebe6035765ee3f3c05c2ef747f9b2869da239825c08667935c9fb5"} err="failed to get container status \"dd63d88fe2ebe6035765ee3f3c05c2ef747f9b2869da239825c08667935c9fb5\": rpc error: code = NotFound desc = could not find container \"dd63d88fe2ebe6035765ee3f3c05c2ef747f9b2869da239825c08667935c9fb5\": container with ID starting with dd63d88fe2ebe6035765ee3f3c05c2ef747f9b2869da239825c08667935c9fb5 not found: ID does not exist" Oct 11 04:04:32 crc kubenswrapper[4798]: I1011 04:04:32.153452 4798 scope.go:117] "RemoveContainer" containerID="671945e25068ff79ab8574dd96b2d81646b8240da41f34a10aa857f0c2520df2" Oct 11 04:04:32 crc kubenswrapper[4798]: E1011 04:04:32.153807 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"671945e25068ff79ab8574dd96b2d81646b8240da41f34a10aa857f0c2520df2\": container with ID starting with 671945e25068ff79ab8574dd96b2d81646b8240da41f34a10aa857f0c2520df2 not found: ID does not exist" containerID="671945e25068ff79ab8574dd96b2d81646b8240da41f34a10aa857f0c2520df2" Oct 11 04:04:32 crc kubenswrapper[4798]: I1011 04:04:32.153834 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"671945e25068ff79ab8574dd96b2d81646b8240da41f34a10aa857f0c2520df2"} err="failed to get container status \"671945e25068ff79ab8574dd96b2d81646b8240da41f34a10aa857f0c2520df2\": rpc error: code = NotFound desc = could not find container \"671945e25068ff79ab8574dd96b2d81646b8240da41f34a10aa857f0c2520df2\": container with ID starting with 671945e25068ff79ab8574dd96b2d81646b8240da41f34a10aa857f0c2520df2 not found: ID does not exist" Oct 11 04:04:32 crc kubenswrapper[4798]: I1011 04:04:32.153849 4798 scope.go:117] "RemoveContainer" containerID="5667e3d7288833ff8c576f64f111c3a2f59619a3577f969d0afdd633962bd30d" Oct 11 04:04:32 crc kubenswrapper[4798]: E1011 04:04:32.154438 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5667e3d7288833ff8c576f64f111c3a2f59619a3577f969d0afdd633962bd30d\": container with ID starting with 5667e3d7288833ff8c576f64f111c3a2f59619a3577f969d0afdd633962bd30d not found: ID does not exist" containerID="5667e3d7288833ff8c576f64f111c3a2f59619a3577f969d0afdd633962bd30d" Oct 11 04:04:32 crc kubenswrapper[4798]: I1011 04:04:32.154492 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5667e3d7288833ff8c576f64f111c3a2f59619a3577f969d0afdd633962bd30d"} err="failed to get container status \"5667e3d7288833ff8c576f64f111c3a2f59619a3577f969d0afdd633962bd30d\": rpc error: code = NotFound desc = could not find container \"5667e3d7288833ff8c576f64f111c3a2f59619a3577f969d0afdd633962bd30d\": container with ID starting with 5667e3d7288833ff8c576f64f111c3a2f59619a3577f969d0afdd633962bd30d not found: ID does not exist" Oct 11 04:04:32 crc kubenswrapper[4798]: I1011 04:04:32.154524 4798 scope.go:117] "RemoveContainer" containerID="e5a66de04a274f07f090f66d9da727e1985f9c171038de366b409a0300d1b6cc" Oct 11 04:04:32 crc kubenswrapper[4798]: E1011 04:04:32.155413 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e5a66de04a274f07f090f66d9da727e1985f9c171038de366b409a0300d1b6cc\": container with ID starting with e5a66de04a274f07f090f66d9da727e1985f9c171038de366b409a0300d1b6cc not found: ID does not exist" containerID="e5a66de04a274f07f090f66d9da727e1985f9c171038de366b409a0300d1b6cc" Oct 11 04:04:32 crc kubenswrapper[4798]: I1011 04:04:32.155445 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e5a66de04a274f07f090f66d9da727e1985f9c171038de366b409a0300d1b6cc"} err="failed to get container status \"e5a66de04a274f07f090f66d9da727e1985f9c171038de366b409a0300d1b6cc\": rpc error: code = NotFound desc = could not find container \"e5a66de04a274f07f090f66d9da727e1985f9c171038de366b409a0300d1b6cc\": container with ID starting with e5a66de04a274f07f090f66d9da727e1985f9c171038de366b409a0300d1b6cc not found: ID does not exist" Oct 11 04:04:32 crc kubenswrapper[4798]: I1011 04:04:32.155465 4798 scope.go:117] "RemoveContainer" containerID="337fb496a29d8027f6400b57b1625b68029a16e368139f753b9b94df22448477" Oct 11 04:04:32 crc kubenswrapper[4798]: E1011 04:04:32.156194 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"337fb496a29d8027f6400b57b1625b68029a16e368139f753b9b94df22448477\": container with ID starting with 337fb496a29d8027f6400b57b1625b68029a16e368139f753b9b94df22448477 not found: ID does not exist" containerID="337fb496a29d8027f6400b57b1625b68029a16e368139f753b9b94df22448477" Oct 11 04:04:32 crc kubenswrapper[4798]: I1011 04:04:32.156290 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"337fb496a29d8027f6400b57b1625b68029a16e368139f753b9b94df22448477"} err="failed to get container status \"337fb496a29d8027f6400b57b1625b68029a16e368139f753b9b94df22448477\": rpc error: code = NotFound desc = could not find container \"337fb496a29d8027f6400b57b1625b68029a16e368139f753b9b94df22448477\": container with ID starting with 337fb496a29d8027f6400b57b1625b68029a16e368139f753b9b94df22448477 not found: ID does not exist" Oct 11 04:04:32 crc kubenswrapper[4798]: I1011 04:04:32.156346 4798 scope.go:117] "RemoveContainer" containerID="4c9529e6c53d42e111799753c41ef620b382b2a2b1991aa71d4ee846029a5136" Oct 11 04:04:32 crc kubenswrapper[4798]: E1011 04:04:32.156776 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4c9529e6c53d42e111799753c41ef620b382b2a2b1991aa71d4ee846029a5136\": container with ID starting with 4c9529e6c53d42e111799753c41ef620b382b2a2b1991aa71d4ee846029a5136 not found: ID does not exist" containerID="4c9529e6c53d42e111799753c41ef620b382b2a2b1991aa71d4ee846029a5136" Oct 11 04:04:32 crc kubenswrapper[4798]: I1011 04:04:32.156802 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4c9529e6c53d42e111799753c41ef620b382b2a2b1991aa71d4ee846029a5136"} err="failed to get container status \"4c9529e6c53d42e111799753c41ef620b382b2a2b1991aa71d4ee846029a5136\": rpc error: code = NotFound desc = could not find container \"4c9529e6c53d42e111799753c41ef620b382b2a2b1991aa71d4ee846029a5136\": container with ID starting with 4c9529e6c53d42e111799753c41ef620b382b2a2b1991aa71d4ee846029a5136 not found: ID does not exist" Oct 11 04:04:32 crc kubenswrapper[4798]: I1011 04:04:32.156820 4798 scope.go:117] "RemoveContainer" containerID="9f8c139ffa0a5dc24b40f6d5bcc2fa9422f58b275692d3ce3ba5a928c1e454a7" Oct 11 04:04:32 crc kubenswrapper[4798]: E1011 04:04:32.157162 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9f8c139ffa0a5dc24b40f6d5bcc2fa9422f58b275692d3ce3ba5a928c1e454a7\": container with ID starting with 9f8c139ffa0a5dc24b40f6d5bcc2fa9422f58b275692d3ce3ba5a928c1e454a7 not found: ID does not exist" containerID="9f8c139ffa0a5dc24b40f6d5bcc2fa9422f58b275692d3ce3ba5a928c1e454a7" Oct 11 04:04:32 crc kubenswrapper[4798]: I1011 04:04:32.157213 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9f8c139ffa0a5dc24b40f6d5bcc2fa9422f58b275692d3ce3ba5a928c1e454a7"} err="failed to get container status \"9f8c139ffa0a5dc24b40f6d5bcc2fa9422f58b275692d3ce3ba5a928c1e454a7\": rpc error: code = NotFound desc = could not find container \"9f8c139ffa0a5dc24b40f6d5bcc2fa9422f58b275692d3ce3ba5a928c1e454a7\": container with ID starting with 9f8c139ffa0a5dc24b40f6d5bcc2fa9422f58b275692d3ce3ba5a928c1e454a7 not found: ID does not exist" Oct 11 04:04:32 crc kubenswrapper[4798]: I1011 04:04:32.157250 4798 scope.go:117] "RemoveContainer" containerID="25f75915fce6470de042b47f0a1f09f1debcb13ab242d5e2a6bd64c4626c72e8" Oct 11 04:04:32 crc kubenswrapper[4798]: I1011 04:04:32.157616 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"25f75915fce6470de042b47f0a1f09f1debcb13ab242d5e2a6bd64c4626c72e8"} err="failed to get container status \"25f75915fce6470de042b47f0a1f09f1debcb13ab242d5e2a6bd64c4626c72e8\": rpc error: code = NotFound desc = could not find container \"25f75915fce6470de042b47f0a1f09f1debcb13ab242d5e2a6bd64c4626c72e8\": container with ID starting with 25f75915fce6470de042b47f0a1f09f1debcb13ab242d5e2a6bd64c4626c72e8 not found: ID does not exist" Oct 11 04:04:32 crc kubenswrapper[4798]: I1011 04:04:32.157665 4798 scope.go:117] "RemoveContainer" containerID="66e2aa76f109bbdb0ec131c0724c460f5c1313b096e2fb3ebf1e42fd4832c88b" Oct 11 04:04:32 crc kubenswrapper[4798]: I1011 04:04:32.158194 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"66e2aa76f109bbdb0ec131c0724c460f5c1313b096e2fb3ebf1e42fd4832c88b"} err="failed to get container status \"66e2aa76f109bbdb0ec131c0724c460f5c1313b096e2fb3ebf1e42fd4832c88b\": rpc error: code = NotFound desc = could not find container \"66e2aa76f109bbdb0ec131c0724c460f5c1313b096e2fb3ebf1e42fd4832c88b\": container with ID starting with 66e2aa76f109bbdb0ec131c0724c460f5c1313b096e2fb3ebf1e42fd4832c88b not found: ID does not exist" Oct 11 04:04:32 crc kubenswrapper[4798]: I1011 04:04:32.158221 4798 scope.go:117] "RemoveContainer" containerID="732433bbe785758ed4cbedf667608c4780a5d1e7e2eafd432735f0d74ea01d48" Oct 11 04:04:32 crc kubenswrapper[4798]: I1011 04:04:32.158856 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"732433bbe785758ed4cbedf667608c4780a5d1e7e2eafd432735f0d74ea01d48"} err="failed to get container status \"732433bbe785758ed4cbedf667608c4780a5d1e7e2eafd432735f0d74ea01d48\": rpc error: code = NotFound desc = could not find container \"732433bbe785758ed4cbedf667608c4780a5d1e7e2eafd432735f0d74ea01d48\": container with ID starting with 732433bbe785758ed4cbedf667608c4780a5d1e7e2eafd432735f0d74ea01d48 not found: ID does not exist" Oct 11 04:04:32 crc kubenswrapper[4798]: I1011 04:04:32.158879 4798 scope.go:117] "RemoveContainer" containerID="dd63d88fe2ebe6035765ee3f3c05c2ef747f9b2869da239825c08667935c9fb5" Oct 11 04:04:32 crc kubenswrapper[4798]: I1011 04:04:32.159235 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dd63d88fe2ebe6035765ee3f3c05c2ef747f9b2869da239825c08667935c9fb5"} err="failed to get container status \"dd63d88fe2ebe6035765ee3f3c05c2ef747f9b2869da239825c08667935c9fb5\": rpc error: code = NotFound desc = could not find container \"dd63d88fe2ebe6035765ee3f3c05c2ef747f9b2869da239825c08667935c9fb5\": container with ID starting with dd63d88fe2ebe6035765ee3f3c05c2ef747f9b2869da239825c08667935c9fb5 not found: ID does not exist" Oct 11 04:04:32 crc kubenswrapper[4798]: I1011 04:04:32.159279 4798 scope.go:117] "RemoveContainer" containerID="671945e25068ff79ab8574dd96b2d81646b8240da41f34a10aa857f0c2520df2" Oct 11 04:04:32 crc kubenswrapper[4798]: I1011 04:04:32.159693 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"671945e25068ff79ab8574dd96b2d81646b8240da41f34a10aa857f0c2520df2"} err="failed to get container status \"671945e25068ff79ab8574dd96b2d81646b8240da41f34a10aa857f0c2520df2\": rpc error: code = NotFound desc = could not find container \"671945e25068ff79ab8574dd96b2d81646b8240da41f34a10aa857f0c2520df2\": container with ID starting with 671945e25068ff79ab8574dd96b2d81646b8240da41f34a10aa857f0c2520df2 not found: ID does not exist" Oct 11 04:04:32 crc kubenswrapper[4798]: I1011 04:04:32.159726 4798 scope.go:117] "RemoveContainer" containerID="5667e3d7288833ff8c576f64f111c3a2f59619a3577f969d0afdd633962bd30d" Oct 11 04:04:32 crc kubenswrapper[4798]: I1011 04:04:32.160062 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5667e3d7288833ff8c576f64f111c3a2f59619a3577f969d0afdd633962bd30d"} err="failed to get container status \"5667e3d7288833ff8c576f64f111c3a2f59619a3577f969d0afdd633962bd30d\": rpc error: code = NotFound desc = could not find container \"5667e3d7288833ff8c576f64f111c3a2f59619a3577f969d0afdd633962bd30d\": container with ID starting with 5667e3d7288833ff8c576f64f111c3a2f59619a3577f969d0afdd633962bd30d not found: ID does not exist" Oct 11 04:04:32 crc kubenswrapper[4798]: I1011 04:04:32.160112 4798 scope.go:117] "RemoveContainer" containerID="e5a66de04a274f07f090f66d9da727e1985f9c171038de366b409a0300d1b6cc" Oct 11 04:04:32 crc kubenswrapper[4798]: I1011 04:04:32.160381 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e5a66de04a274f07f090f66d9da727e1985f9c171038de366b409a0300d1b6cc"} err="failed to get container status \"e5a66de04a274f07f090f66d9da727e1985f9c171038de366b409a0300d1b6cc\": rpc error: code = NotFound desc = could not find container \"e5a66de04a274f07f090f66d9da727e1985f9c171038de366b409a0300d1b6cc\": container with ID starting with e5a66de04a274f07f090f66d9da727e1985f9c171038de366b409a0300d1b6cc not found: ID does not exist" Oct 11 04:04:32 crc kubenswrapper[4798]: I1011 04:04:32.160440 4798 scope.go:117] "RemoveContainer" containerID="337fb496a29d8027f6400b57b1625b68029a16e368139f753b9b94df22448477" Oct 11 04:04:32 crc kubenswrapper[4798]: I1011 04:04:32.160832 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"337fb496a29d8027f6400b57b1625b68029a16e368139f753b9b94df22448477"} err="failed to get container status \"337fb496a29d8027f6400b57b1625b68029a16e368139f753b9b94df22448477\": rpc error: code = NotFound desc = could not find container \"337fb496a29d8027f6400b57b1625b68029a16e368139f753b9b94df22448477\": container with ID starting with 337fb496a29d8027f6400b57b1625b68029a16e368139f753b9b94df22448477 not found: ID does not exist" Oct 11 04:04:32 crc kubenswrapper[4798]: I1011 04:04:32.160875 4798 scope.go:117] "RemoveContainer" containerID="4c9529e6c53d42e111799753c41ef620b382b2a2b1991aa71d4ee846029a5136" Oct 11 04:04:32 crc kubenswrapper[4798]: I1011 04:04:32.161182 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4c9529e6c53d42e111799753c41ef620b382b2a2b1991aa71d4ee846029a5136"} err="failed to get container status \"4c9529e6c53d42e111799753c41ef620b382b2a2b1991aa71d4ee846029a5136\": rpc error: code = NotFound desc = could not find container \"4c9529e6c53d42e111799753c41ef620b382b2a2b1991aa71d4ee846029a5136\": container with ID starting with 4c9529e6c53d42e111799753c41ef620b382b2a2b1991aa71d4ee846029a5136 not found: ID does not exist" Oct 11 04:04:32 crc kubenswrapper[4798]: I1011 04:04:32.161234 4798 scope.go:117] "RemoveContainer" containerID="9f8c139ffa0a5dc24b40f6d5bcc2fa9422f58b275692d3ce3ba5a928c1e454a7" Oct 11 04:04:32 crc kubenswrapper[4798]: I1011 04:04:32.161684 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9f8c139ffa0a5dc24b40f6d5bcc2fa9422f58b275692d3ce3ba5a928c1e454a7"} err="failed to get container status \"9f8c139ffa0a5dc24b40f6d5bcc2fa9422f58b275692d3ce3ba5a928c1e454a7\": rpc error: code = NotFound desc = could not find container \"9f8c139ffa0a5dc24b40f6d5bcc2fa9422f58b275692d3ce3ba5a928c1e454a7\": container with ID starting with 9f8c139ffa0a5dc24b40f6d5bcc2fa9422f58b275692d3ce3ba5a928c1e454a7 not found: ID does not exist" Oct 11 04:04:32 crc kubenswrapper[4798]: I1011 04:04:32.161713 4798 scope.go:117] "RemoveContainer" containerID="25f75915fce6470de042b47f0a1f09f1debcb13ab242d5e2a6bd64c4626c72e8" Oct 11 04:04:32 crc kubenswrapper[4798]: I1011 04:04:32.162051 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"25f75915fce6470de042b47f0a1f09f1debcb13ab242d5e2a6bd64c4626c72e8"} err="failed to get container status \"25f75915fce6470de042b47f0a1f09f1debcb13ab242d5e2a6bd64c4626c72e8\": rpc error: code = NotFound desc = could not find container \"25f75915fce6470de042b47f0a1f09f1debcb13ab242d5e2a6bd64c4626c72e8\": container with ID starting with 25f75915fce6470de042b47f0a1f09f1debcb13ab242d5e2a6bd64c4626c72e8 not found: ID does not exist" Oct 11 04:04:32 crc kubenswrapper[4798]: I1011 04:04:32.162075 4798 scope.go:117] "RemoveContainer" containerID="66e2aa76f109bbdb0ec131c0724c460f5c1313b096e2fb3ebf1e42fd4832c88b" Oct 11 04:04:32 crc kubenswrapper[4798]: I1011 04:04:32.164763 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"66e2aa76f109bbdb0ec131c0724c460f5c1313b096e2fb3ebf1e42fd4832c88b"} err="failed to get container status \"66e2aa76f109bbdb0ec131c0724c460f5c1313b096e2fb3ebf1e42fd4832c88b\": rpc error: code = NotFound desc = could not find container \"66e2aa76f109bbdb0ec131c0724c460f5c1313b096e2fb3ebf1e42fd4832c88b\": container with ID starting with 66e2aa76f109bbdb0ec131c0724c460f5c1313b096e2fb3ebf1e42fd4832c88b not found: ID does not exist" Oct 11 04:04:32 crc kubenswrapper[4798]: I1011 04:04:32.164819 4798 scope.go:117] "RemoveContainer" containerID="732433bbe785758ed4cbedf667608c4780a5d1e7e2eafd432735f0d74ea01d48" Oct 11 04:04:32 crc kubenswrapper[4798]: I1011 04:04:32.165207 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"732433bbe785758ed4cbedf667608c4780a5d1e7e2eafd432735f0d74ea01d48"} err="failed to get container status \"732433bbe785758ed4cbedf667608c4780a5d1e7e2eafd432735f0d74ea01d48\": rpc error: code = NotFound desc = could not find container \"732433bbe785758ed4cbedf667608c4780a5d1e7e2eafd432735f0d74ea01d48\": container with ID starting with 732433bbe785758ed4cbedf667608c4780a5d1e7e2eafd432735f0d74ea01d48 not found: ID does not exist" Oct 11 04:04:32 crc kubenswrapper[4798]: I1011 04:04:32.165237 4798 scope.go:117] "RemoveContainer" containerID="dd63d88fe2ebe6035765ee3f3c05c2ef747f9b2869da239825c08667935c9fb5" Oct 11 04:04:32 crc kubenswrapper[4798]: I1011 04:04:32.165604 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dd63d88fe2ebe6035765ee3f3c05c2ef747f9b2869da239825c08667935c9fb5"} err="failed to get container status \"dd63d88fe2ebe6035765ee3f3c05c2ef747f9b2869da239825c08667935c9fb5\": rpc error: code = NotFound desc = could not find container \"dd63d88fe2ebe6035765ee3f3c05c2ef747f9b2869da239825c08667935c9fb5\": container with ID starting with dd63d88fe2ebe6035765ee3f3c05c2ef747f9b2869da239825c08667935c9fb5 not found: ID does not exist" Oct 11 04:04:32 crc kubenswrapper[4798]: I1011 04:04:32.165655 4798 scope.go:117] "RemoveContainer" containerID="671945e25068ff79ab8574dd96b2d81646b8240da41f34a10aa857f0c2520df2" Oct 11 04:04:32 crc kubenswrapper[4798]: I1011 04:04:32.165963 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"671945e25068ff79ab8574dd96b2d81646b8240da41f34a10aa857f0c2520df2"} err="failed to get container status \"671945e25068ff79ab8574dd96b2d81646b8240da41f34a10aa857f0c2520df2\": rpc error: code = NotFound desc = could not find container \"671945e25068ff79ab8574dd96b2d81646b8240da41f34a10aa857f0c2520df2\": container with ID starting with 671945e25068ff79ab8574dd96b2d81646b8240da41f34a10aa857f0c2520df2 not found: ID does not exist" Oct 11 04:04:32 crc kubenswrapper[4798]: I1011 04:04:32.165997 4798 scope.go:117] "RemoveContainer" containerID="5667e3d7288833ff8c576f64f111c3a2f59619a3577f969d0afdd633962bd30d" Oct 11 04:04:32 crc kubenswrapper[4798]: I1011 04:04:32.166246 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5667e3d7288833ff8c576f64f111c3a2f59619a3577f969d0afdd633962bd30d"} err="failed to get container status \"5667e3d7288833ff8c576f64f111c3a2f59619a3577f969d0afdd633962bd30d\": rpc error: code = NotFound desc = could not find container \"5667e3d7288833ff8c576f64f111c3a2f59619a3577f969d0afdd633962bd30d\": container with ID starting with 5667e3d7288833ff8c576f64f111c3a2f59619a3577f969d0afdd633962bd30d not found: ID does not exist" Oct 11 04:04:32 crc kubenswrapper[4798]: I1011 04:04:32.166276 4798 scope.go:117] "RemoveContainer" containerID="e5a66de04a274f07f090f66d9da727e1985f9c171038de366b409a0300d1b6cc" Oct 11 04:04:32 crc kubenswrapper[4798]: I1011 04:04:32.166530 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e5a66de04a274f07f090f66d9da727e1985f9c171038de366b409a0300d1b6cc"} err="failed to get container status \"e5a66de04a274f07f090f66d9da727e1985f9c171038de366b409a0300d1b6cc\": rpc error: code = NotFound desc = could not find container \"e5a66de04a274f07f090f66d9da727e1985f9c171038de366b409a0300d1b6cc\": container with ID starting with e5a66de04a274f07f090f66d9da727e1985f9c171038de366b409a0300d1b6cc not found: ID does not exist" Oct 11 04:04:32 crc kubenswrapper[4798]: I1011 04:04:32.166553 4798 scope.go:117] "RemoveContainer" containerID="337fb496a29d8027f6400b57b1625b68029a16e368139f753b9b94df22448477" Oct 11 04:04:32 crc kubenswrapper[4798]: I1011 04:04:32.166769 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"337fb496a29d8027f6400b57b1625b68029a16e368139f753b9b94df22448477"} err="failed to get container status \"337fb496a29d8027f6400b57b1625b68029a16e368139f753b9b94df22448477\": rpc error: code = NotFound desc = could not find container \"337fb496a29d8027f6400b57b1625b68029a16e368139f753b9b94df22448477\": container with ID starting with 337fb496a29d8027f6400b57b1625b68029a16e368139f753b9b94df22448477 not found: ID does not exist" Oct 11 04:04:32 crc kubenswrapper[4798]: I1011 04:04:32.166789 4798 scope.go:117] "RemoveContainer" containerID="4c9529e6c53d42e111799753c41ef620b382b2a2b1991aa71d4ee846029a5136" Oct 11 04:04:32 crc kubenswrapper[4798]: I1011 04:04:32.167043 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4c9529e6c53d42e111799753c41ef620b382b2a2b1991aa71d4ee846029a5136"} err="failed to get container status \"4c9529e6c53d42e111799753c41ef620b382b2a2b1991aa71d4ee846029a5136\": rpc error: code = NotFound desc = could not find container \"4c9529e6c53d42e111799753c41ef620b382b2a2b1991aa71d4ee846029a5136\": container with ID starting with 4c9529e6c53d42e111799753c41ef620b382b2a2b1991aa71d4ee846029a5136 not found: ID does not exist" Oct 11 04:04:32 crc kubenswrapper[4798]: I1011 04:04:32.167064 4798 scope.go:117] "RemoveContainer" containerID="9f8c139ffa0a5dc24b40f6d5bcc2fa9422f58b275692d3ce3ba5a928c1e454a7" Oct 11 04:04:32 crc kubenswrapper[4798]: I1011 04:04:32.167233 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9f8c139ffa0a5dc24b40f6d5bcc2fa9422f58b275692d3ce3ba5a928c1e454a7"} err="failed to get container status \"9f8c139ffa0a5dc24b40f6d5bcc2fa9422f58b275692d3ce3ba5a928c1e454a7\": rpc error: code = NotFound desc = could not find container \"9f8c139ffa0a5dc24b40f6d5bcc2fa9422f58b275692d3ce3ba5a928c1e454a7\": container with ID starting with 9f8c139ffa0a5dc24b40f6d5bcc2fa9422f58b275692d3ce3ba5a928c1e454a7 not found: ID does not exist" Oct 11 04:04:32 crc kubenswrapper[4798]: I1011 04:04:32.167258 4798 scope.go:117] "RemoveContainer" containerID="25f75915fce6470de042b47f0a1f09f1debcb13ab242d5e2a6bd64c4626c72e8" Oct 11 04:04:32 crc kubenswrapper[4798]: I1011 04:04:32.167457 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"25f75915fce6470de042b47f0a1f09f1debcb13ab242d5e2a6bd64c4626c72e8"} err="failed to get container status \"25f75915fce6470de042b47f0a1f09f1debcb13ab242d5e2a6bd64c4626c72e8\": rpc error: code = NotFound desc = could not find container \"25f75915fce6470de042b47f0a1f09f1debcb13ab242d5e2a6bd64c4626c72e8\": container with ID starting with 25f75915fce6470de042b47f0a1f09f1debcb13ab242d5e2a6bd64c4626c72e8 not found: ID does not exist" Oct 11 04:04:32 crc kubenswrapper[4798]: I1011 04:04:32.167481 4798 scope.go:117] "RemoveContainer" containerID="66e2aa76f109bbdb0ec131c0724c460f5c1313b096e2fb3ebf1e42fd4832c88b" Oct 11 04:04:32 crc kubenswrapper[4798]: I1011 04:04:32.167677 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"66e2aa76f109bbdb0ec131c0724c460f5c1313b096e2fb3ebf1e42fd4832c88b"} err="failed to get container status \"66e2aa76f109bbdb0ec131c0724c460f5c1313b096e2fb3ebf1e42fd4832c88b\": rpc error: code = NotFound desc = could not find container \"66e2aa76f109bbdb0ec131c0724c460f5c1313b096e2fb3ebf1e42fd4832c88b\": container with ID starting with 66e2aa76f109bbdb0ec131c0724c460f5c1313b096e2fb3ebf1e42fd4832c88b not found: ID does not exist" Oct 11 04:04:32 crc kubenswrapper[4798]: I1011 04:04:32.167697 4798 scope.go:117] "RemoveContainer" containerID="732433bbe785758ed4cbedf667608c4780a5d1e7e2eafd432735f0d74ea01d48" Oct 11 04:04:32 crc kubenswrapper[4798]: I1011 04:04:32.167861 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"732433bbe785758ed4cbedf667608c4780a5d1e7e2eafd432735f0d74ea01d48"} err="failed to get container status \"732433bbe785758ed4cbedf667608c4780a5d1e7e2eafd432735f0d74ea01d48\": rpc error: code = NotFound desc = could not find container \"732433bbe785758ed4cbedf667608c4780a5d1e7e2eafd432735f0d74ea01d48\": container with ID starting with 732433bbe785758ed4cbedf667608c4780a5d1e7e2eafd432735f0d74ea01d48 not found: ID does not exist" Oct 11 04:04:32 crc kubenswrapper[4798]: I1011 04:04:32.167878 4798 scope.go:117] "RemoveContainer" containerID="dd63d88fe2ebe6035765ee3f3c05c2ef747f9b2869da239825c08667935c9fb5" Oct 11 04:04:32 crc kubenswrapper[4798]: I1011 04:04:32.168037 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dd63d88fe2ebe6035765ee3f3c05c2ef747f9b2869da239825c08667935c9fb5"} err="failed to get container status \"dd63d88fe2ebe6035765ee3f3c05c2ef747f9b2869da239825c08667935c9fb5\": rpc error: code = NotFound desc = could not find container \"dd63d88fe2ebe6035765ee3f3c05c2ef747f9b2869da239825c08667935c9fb5\": container with ID starting with dd63d88fe2ebe6035765ee3f3c05c2ef747f9b2869da239825c08667935c9fb5 not found: ID does not exist" Oct 11 04:04:32 crc kubenswrapper[4798]: I1011 04:04:32.168055 4798 scope.go:117] "RemoveContainer" containerID="671945e25068ff79ab8574dd96b2d81646b8240da41f34a10aa857f0c2520df2" Oct 11 04:04:32 crc kubenswrapper[4798]: I1011 04:04:32.168218 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"671945e25068ff79ab8574dd96b2d81646b8240da41f34a10aa857f0c2520df2"} err="failed to get container status \"671945e25068ff79ab8574dd96b2d81646b8240da41f34a10aa857f0c2520df2\": rpc error: code = NotFound desc = could not find container \"671945e25068ff79ab8574dd96b2d81646b8240da41f34a10aa857f0c2520df2\": container with ID starting with 671945e25068ff79ab8574dd96b2d81646b8240da41f34a10aa857f0c2520df2 not found: ID does not exist" Oct 11 04:04:32 crc kubenswrapper[4798]: I1011 04:04:32.168238 4798 scope.go:117] "RemoveContainer" containerID="5667e3d7288833ff8c576f64f111c3a2f59619a3577f969d0afdd633962bd30d" Oct 11 04:04:32 crc kubenswrapper[4798]: I1011 04:04:32.168655 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5667e3d7288833ff8c576f64f111c3a2f59619a3577f969d0afdd633962bd30d"} err="failed to get container status \"5667e3d7288833ff8c576f64f111c3a2f59619a3577f969d0afdd633962bd30d\": rpc error: code = NotFound desc = could not find container \"5667e3d7288833ff8c576f64f111c3a2f59619a3577f969d0afdd633962bd30d\": container with ID starting with 5667e3d7288833ff8c576f64f111c3a2f59619a3577f969d0afdd633962bd30d not found: ID does not exist" Oct 11 04:04:32 crc kubenswrapper[4798]: I1011 04:04:32.168748 4798 scope.go:117] "RemoveContainer" containerID="e5a66de04a274f07f090f66d9da727e1985f9c171038de366b409a0300d1b6cc" Oct 11 04:04:32 crc kubenswrapper[4798]: I1011 04:04:32.169151 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e5a66de04a274f07f090f66d9da727e1985f9c171038de366b409a0300d1b6cc"} err="failed to get container status \"e5a66de04a274f07f090f66d9da727e1985f9c171038de366b409a0300d1b6cc\": rpc error: code = NotFound desc = could not find container \"e5a66de04a274f07f090f66d9da727e1985f9c171038de366b409a0300d1b6cc\": container with ID starting with e5a66de04a274f07f090f66d9da727e1985f9c171038de366b409a0300d1b6cc not found: ID does not exist" Oct 11 04:04:32 crc kubenswrapper[4798]: I1011 04:04:32.169174 4798 scope.go:117] "RemoveContainer" containerID="337fb496a29d8027f6400b57b1625b68029a16e368139f753b9b94df22448477" Oct 11 04:04:32 crc kubenswrapper[4798]: I1011 04:04:32.169425 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"337fb496a29d8027f6400b57b1625b68029a16e368139f753b9b94df22448477"} err="failed to get container status \"337fb496a29d8027f6400b57b1625b68029a16e368139f753b9b94df22448477\": rpc error: code = NotFound desc = could not find container \"337fb496a29d8027f6400b57b1625b68029a16e368139f753b9b94df22448477\": container with ID starting with 337fb496a29d8027f6400b57b1625b68029a16e368139f753b9b94df22448477 not found: ID does not exist" Oct 11 04:04:32 crc kubenswrapper[4798]: I1011 04:04:32.169449 4798 scope.go:117] "RemoveContainer" containerID="4c9529e6c53d42e111799753c41ef620b382b2a2b1991aa71d4ee846029a5136" Oct 11 04:04:32 crc kubenswrapper[4798]: I1011 04:04:32.169671 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4c9529e6c53d42e111799753c41ef620b382b2a2b1991aa71d4ee846029a5136"} err="failed to get container status \"4c9529e6c53d42e111799753c41ef620b382b2a2b1991aa71d4ee846029a5136\": rpc error: code = NotFound desc = could not find container \"4c9529e6c53d42e111799753c41ef620b382b2a2b1991aa71d4ee846029a5136\": container with ID starting with 4c9529e6c53d42e111799753c41ef620b382b2a2b1991aa71d4ee846029a5136 not found: ID does not exist" Oct 11 04:04:32 crc kubenswrapper[4798]: I1011 04:04:32.169762 4798 scope.go:117] "RemoveContainer" containerID="9f8c139ffa0a5dc24b40f6d5bcc2fa9422f58b275692d3ce3ba5a928c1e454a7" Oct 11 04:04:32 crc kubenswrapper[4798]: I1011 04:04:32.170059 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9f8c139ffa0a5dc24b40f6d5bcc2fa9422f58b275692d3ce3ba5a928c1e454a7"} err="failed to get container status \"9f8c139ffa0a5dc24b40f6d5bcc2fa9422f58b275692d3ce3ba5a928c1e454a7\": rpc error: code = NotFound desc = could not find container \"9f8c139ffa0a5dc24b40f6d5bcc2fa9422f58b275692d3ce3ba5a928c1e454a7\": container with ID starting with 9f8c139ffa0a5dc24b40f6d5bcc2fa9422f58b275692d3ce3ba5a928c1e454a7 not found: ID does not exist" Oct 11 04:04:32 crc kubenswrapper[4798]: I1011 04:04:32.170085 4798 scope.go:117] "RemoveContainer" containerID="25f75915fce6470de042b47f0a1f09f1debcb13ab242d5e2a6bd64c4626c72e8" Oct 11 04:04:32 crc kubenswrapper[4798]: I1011 04:04:32.170420 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"25f75915fce6470de042b47f0a1f09f1debcb13ab242d5e2a6bd64c4626c72e8"} err="failed to get container status \"25f75915fce6470de042b47f0a1f09f1debcb13ab242d5e2a6bd64c4626c72e8\": rpc error: code = NotFound desc = could not find container \"25f75915fce6470de042b47f0a1f09f1debcb13ab242d5e2a6bd64c4626c72e8\": container with ID starting with 25f75915fce6470de042b47f0a1f09f1debcb13ab242d5e2a6bd64c4626c72e8 not found: ID does not exist" Oct 11 04:04:32 crc kubenswrapper[4798]: I1011 04:04:32.830880 4798 generic.go:334] "Generic (PLEG): container finished" podID="f0d6d221-31f9-42f3-b699-7d07a63e8b30" containerID="84606b3ce9026d911aff2f62e391e7e67a4529af4504d8595b7375738f6bc001" exitCode=0 Oct 11 04:04:32 crc kubenswrapper[4798]: I1011 04:04:32.830937 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-t5nsh" event={"ID":"f0d6d221-31f9-42f3-b699-7d07a63e8b30","Type":"ContainerDied","Data":"84606b3ce9026d911aff2f62e391e7e67a4529af4504d8595b7375738f6bc001"} Oct 11 04:04:32 crc kubenswrapper[4798]: I1011 04:04:32.830984 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-t5nsh" event={"ID":"f0d6d221-31f9-42f3-b699-7d07a63e8b30","Type":"ContainerStarted","Data":"84c89abf07d83c213d427cc14a54a67585a4c89552b502168da279e666c8a004"} Oct 11 04:04:33 crc kubenswrapper[4798]: I1011 04:04:33.431976 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3c2a342b-5252-4957-9e2c-8f81c304b8af" path="/var/lib/kubelet/pods/3c2a342b-5252-4957-9e2c-8f81c304b8af/volumes" Oct 11 04:04:33 crc kubenswrapper[4798]: I1011 04:04:33.842466 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-t5nsh" event={"ID":"f0d6d221-31f9-42f3-b699-7d07a63e8b30","Type":"ContainerStarted","Data":"d35fb4eded2d75bb98e77959e13b1cca33c3864e1121dd1454899960cce61ecf"} Oct 11 04:04:33 crc kubenswrapper[4798]: I1011 04:04:33.842932 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-t5nsh" event={"ID":"f0d6d221-31f9-42f3-b699-7d07a63e8b30","Type":"ContainerStarted","Data":"7990d61e827fb7eaaf5e2d18153655fdbaf960aab9fbd49c3557e7c4d43e9b63"} Oct 11 04:04:33 crc kubenswrapper[4798]: I1011 04:04:33.842951 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-t5nsh" event={"ID":"f0d6d221-31f9-42f3-b699-7d07a63e8b30","Type":"ContainerStarted","Data":"a35ac08c2cc73f54c74d3edaa05761d5d0cd26b129bb4d976e20b1eb82064c8d"} Oct 11 04:04:33 crc kubenswrapper[4798]: I1011 04:04:33.842965 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-t5nsh" event={"ID":"f0d6d221-31f9-42f3-b699-7d07a63e8b30","Type":"ContainerStarted","Data":"85435d74e26d504501ea4b6f6441b586c0906a904128648582f91744197fc32b"} Oct 11 04:04:33 crc kubenswrapper[4798]: I1011 04:04:33.842978 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-t5nsh" event={"ID":"f0d6d221-31f9-42f3-b699-7d07a63e8b30","Type":"ContainerStarted","Data":"70a31cb5dd8a4767f61066181445e22063416ed4abd2f0b153d44d32a14b3299"} Oct 11 04:04:33 crc kubenswrapper[4798]: I1011 04:04:33.842991 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-t5nsh" event={"ID":"f0d6d221-31f9-42f3-b699-7d07a63e8b30","Type":"ContainerStarted","Data":"e23187e37e8b81c279357bc210ea356c9c2e0d2d738cbba1c2ba45a3676c7298"} Oct 11 04:04:36 crc kubenswrapper[4798]: I1011 04:04:36.870912 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-t5nsh" event={"ID":"f0d6d221-31f9-42f3-b699-7d07a63e8b30","Type":"ContainerStarted","Data":"17ad90375960818e617699ffbf6455d41fa028f7d30c4288ce1b5b266246a234"} Oct 11 04:04:38 crc kubenswrapper[4798]: I1011 04:04:38.887041 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-t5nsh" event={"ID":"f0d6d221-31f9-42f3-b699-7d07a63e8b30","Type":"ContainerStarted","Data":"845795e41385ecf7bc163148fe23318a5176a45e349e9f403eecbbf92a60a99c"} Oct 11 04:04:38 crc kubenswrapper[4798]: I1011 04:04:38.887422 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-t5nsh" Oct 11 04:04:38 crc kubenswrapper[4798]: I1011 04:04:38.887435 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-t5nsh" Oct 11 04:04:38 crc kubenswrapper[4798]: I1011 04:04:38.915159 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-t5nsh" Oct 11 04:04:38 crc kubenswrapper[4798]: I1011 04:04:38.917517 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-t5nsh" podStartSLOduration=7.917492936 podStartE2EDuration="7.917492936s" podCreationTimestamp="2025-10-11 04:04:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 04:04:38.91426967 +0000 UTC m=+574.250559376" watchObservedRunningTime="2025-10-11 04:04:38.917492936 +0000 UTC m=+574.253782612" Oct 11 04:04:39 crc kubenswrapper[4798]: I1011 04:04:39.893965 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-t5nsh" Oct 11 04:04:39 crc kubenswrapper[4798]: I1011 04:04:39.939250 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-t5nsh" Oct 11 04:04:42 crc kubenswrapper[4798]: I1011 04:04:42.423697 4798 scope.go:117] "RemoveContainer" containerID="23e0ec8a9881a6c50cee2b2d1a8a8ee8cff0b43249558238022f9310a865b8a2" Oct 11 04:04:42 crc kubenswrapper[4798]: E1011 04:04:42.424005 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 20s restarting failed container=kube-multus pod=multus-p6xdd_openshift-multus(bd9c0e44-3329-422a-907b-e9e9bb6194cc)\"" pod="openshift-multus/multus-p6xdd" podUID="bd9c0e44-3329-422a-907b-e9e9bb6194cc" Oct 11 04:04:55 crc kubenswrapper[4798]: I1011 04:04:55.428313 4798 scope.go:117] "RemoveContainer" containerID="23e0ec8a9881a6c50cee2b2d1a8a8ee8cff0b43249558238022f9310a865b8a2" Oct 11 04:04:56 crc kubenswrapper[4798]: I1011 04:04:55.999231 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-p6xdd_bd9c0e44-3329-422a-907b-e9e9bb6194cc/kube-multus/2.log" Oct 11 04:04:56 crc kubenswrapper[4798]: I1011 04:04:56.000576 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-p6xdd_bd9c0e44-3329-422a-907b-e9e9bb6194cc/kube-multus/1.log" Oct 11 04:04:56 crc kubenswrapper[4798]: I1011 04:04:56.000635 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-p6xdd" event={"ID":"bd9c0e44-3329-422a-907b-e9e9bb6194cc","Type":"ContainerStarted","Data":"7fed3c9766d2a6febd22e9d7b7a664b94dadc0f8fa5df067084fc00629ee3687"} Oct 11 04:05:01 crc kubenswrapper[4798]: I1011 04:05:01.871764 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-t5nsh" Oct 11 04:05:09 crc kubenswrapper[4798]: I1011 04:05:09.316664 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835c55rlp"] Oct 11 04:05:09 crc kubenswrapper[4798]: I1011 04:05:09.318226 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835c55rlp" Oct 11 04:05:09 crc kubenswrapper[4798]: I1011 04:05:09.323243 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Oct 11 04:05:09 crc kubenswrapper[4798]: I1011 04:05:09.326757 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835c55rlp"] Oct 11 04:05:09 crc kubenswrapper[4798]: I1011 04:05:09.482086 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/177b2fa2-7b2c-4483-a0c5-2c3cf6ceeed2-bundle\") pod \"fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835c55rlp\" (UID: \"177b2fa2-7b2c-4483-a0c5-2c3cf6ceeed2\") " pod="openshift-marketplace/fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835c55rlp" Oct 11 04:05:09 crc kubenswrapper[4798]: I1011 04:05:09.482172 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/177b2fa2-7b2c-4483-a0c5-2c3cf6ceeed2-util\") pod \"fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835c55rlp\" (UID: \"177b2fa2-7b2c-4483-a0c5-2c3cf6ceeed2\") " pod="openshift-marketplace/fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835c55rlp" Oct 11 04:05:09 crc kubenswrapper[4798]: I1011 04:05:09.482261 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j2lmr\" (UniqueName: \"kubernetes.io/projected/177b2fa2-7b2c-4483-a0c5-2c3cf6ceeed2-kube-api-access-j2lmr\") pod \"fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835c55rlp\" (UID: \"177b2fa2-7b2c-4483-a0c5-2c3cf6ceeed2\") " pod="openshift-marketplace/fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835c55rlp" Oct 11 04:05:09 crc kubenswrapper[4798]: I1011 04:05:09.583380 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/177b2fa2-7b2c-4483-a0c5-2c3cf6ceeed2-util\") pod \"fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835c55rlp\" (UID: \"177b2fa2-7b2c-4483-a0c5-2c3cf6ceeed2\") " pod="openshift-marketplace/fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835c55rlp" Oct 11 04:05:09 crc kubenswrapper[4798]: I1011 04:05:09.583475 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j2lmr\" (UniqueName: \"kubernetes.io/projected/177b2fa2-7b2c-4483-a0c5-2c3cf6ceeed2-kube-api-access-j2lmr\") pod \"fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835c55rlp\" (UID: \"177b2fa2-7b2c-4483-a0c5-2c3cf6ceeed2\") " pod="openshift-marketplace/fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835c55rlp" Oct 11 04:05:09 crc kubenswrapper[4798]: I1011 04:05:09.583569 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/177b2fa2-7b2c-4483-a0c5-2c3cf6ceeed2-bundle\") pod \"fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835c55rlp\" (UID: \"177b2fa2-7b2c-4483-a0c5-2c3cf6ceeed2\") " pod="openshift-marketplace/fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835c55rlp" Oct 11 04:05:09 crc kubenswrapper[4798]: I1011 04:05:09.584224 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/177b2fa2-7b2c-4483-a0c5-2c3cf6ceeed2-bundle\") pod \"fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835c55rlp\" (UID: \"177b2fa2-7b2c-4483-a0c5-2c3cf6ceeed2\") " pod="openshift-marketplace/fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835c55rlp" Oct 11 04:05:09 crc kubenswrapper[4798]: I1011 04:05:09.584449 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/177b2fa2-7b2c-4483-a0c5-2c3cf6ceeed2-util\") pod \"fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835c55rlp\" (UID: \"177b2fa2-7b2c-4483-a0c5-2c3cf6ceeed2\") " pod="openshift-marketplace/fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835c55rlp" Oct 11 04:05:09 crc kubenswrapper[4798]: I1011 04:05:09.602997 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j2lmr\" (UniqueName: \"kubernetes.io/projected/177b2fa2-7b2c-4483-a0c5-2c3cf6ceeed2-kube-api-access-j2lmr\") pod \"fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835c55rlp\" (UID: \"177b2fa2-7b2c-4483-a0c5-2c3cf6ceeed2\") " pod="openshift-marketplace/fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835c55rlp" Oct 11 04:05:09 crc kubenswrapper[4798]: I1011 04:05:09.634433 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835c55rlp" Oct 11 04:05:09 crc kubenswrapper[4798]: I1011 04:05:09.879480 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835c55rlp"] Oct 11 04:05:09 crc kubenswrapper[4798]: W1011 04:05:09.891661 4798 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod177b2fa2_7b2c_4483_a0c5_2c3cf6ceeed2.slice/crio-f633de598613bddce8785778040e5a2b1709e0f6169a1cf9fdcfc05d242e1d91 WatchSource:0}: Error finding container f633de598613bddce8785778040e5a2b1709e0f6169a1cf9fdcfc05d242e1d91: Status 404 returned error can't find the container with id f633de598613bddce8785778040e5a2b1709e0f6169a1cf9fdcfc05d242e1d91 Oct 11 04:05:10 crc kubenswrapper[4798]: I1011 04:05:10.088951 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835c55rlp" event={"ID":"177b2fa2-7b2c-4483-a0c5-2c3cf6ceeed2","Type":"ContainerStarted","Data":"76290be33b5cde753f8c9e8f2356555de8638bcf78fa22bf11476b70f844f69c"} Oct 11 04:05:10 crc kubenswrapper[4798]: I1011 04:05:10.089016 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835c55rlp" event={"ID":"177b2fa2-7b2c-4483-a0c5-2c3cf6ceeed2","Type":"ContainerStarted","Data":"f633de598613bddce8785778040e5a2b1709e0f6169a1cf9fdcfc05d242e1d91"} Oct 11 04:05:11 crc kubenswrapper[4798]: I1011 04:05:11.098214 4798 generic.go:334] "Generic (PLEG): container finished" podID="177b2fa2-7b2c-4483-a0c5-2c3cf6ceeed2" containerID="76290be33b5cde753f8c9e8f2356555de8638bcf78fa22bf11476b70f844f69c" exitCode=0 Oct 11 04:05:11 crc kubenswrapper[4798]: I1011 04:05:11.098309 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835c55rlp" event={"ID":"177b2fa2-7b2c-4483-a0c5-2c3cf6ceeed2","Type":"ContainerDied","Data":"76290be33b5cde753f8c9e8f2356555de8638bcf78fa22bf11476b70f844f69c"} Oct 11 04:05:13 crc kubenswrapper[4798]: I1011 04:05:13.111509 4798 generic.go:334] "Generic (PLEG): container finished" podID="177b2fa2-7b2c-4483-a0c5-2c3cf6ceeed2" containerID="6406575de382a2e34d4e5cf2ccb9a2bb3e73ab1db1270a5d1440228115860eff" exitCode=0 Oct 11 04:05:13 crc kubenswrapper[4798]: I1011 04:05:13.111599 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835c55rlp" event={"ID":"177b2fa2-7b2c-4483-a0c5-2c3cf6ceeed2","Type":"ContainerDied","Data":"6406575de382a2e34d4e5cf2ccb9a2bb3e73ab1db1270a5d1440228115860eff"} Oct 11 04:05:14 crc kubenswrapper[4798]: I1011 04:05:14.119540 4798 generic.go:334] "Generic (PLEG): container finished" podID="177b2fa2-7b2c-4483-a0c5-2c3cf6ceeed2" containerID="4f455118f69328ff384f58663050618e3b3260a2d439f1ddcf3a920b8bbda1ba" exitCode=0 Oct 11 04:05:14 crc kubenswrapper[4798]: I1011 04:05:14.119596 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835c55rlp" event={"ID":"177b2fa2-7b2c-4483-a0c5-2c3cf6ceeed2","Type":"ContainerDied","Data":"4f455118f69328ff384f58663050618e3b3260a2d439f1ddcf3a920b8bbda1ba"} Oct 11 04:05:15 crc kubenswrapper[4798]: I1011 04:05:15.386123 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835c55rlp" Oct 11 04:05:15 crc kubenswrapper[4798]: I1011 04:05:15.574162 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/177b2fa2-7b2c-4483-a0c5-2c3cf6ceeed2-bundle\") pod \"177b2fa2-7b2c-4483-a0c5-2c3cf6ceeed2\" (UID: \"177b2fa2-7b2c-4483-a0c5-2c3cf6ceeed2\") " Oct 11 04:05:15 crc kubenswrapper[4798]: I1011 04:05:15.574225 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/177b2fa2-7b2c-4483-a0c5-2c3cf6ceeed2-util\") pod \"177b2fa2-7b2c-4483-a0c5-2c3cf6ceeed2\" (UID: \"177b2fa2-7b2c-4483-a0c5-2c3cf6ceeed2\") " Oct 11 04:05:15 crc kubenswrapper[4798]: I1011 04:05:15.574265 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-j2lmr\" (UniqueName: \"kubernetes.io/projected/177b2fa2-7b2c-4483-a0c5-2c3cf6ceeed2-kube-api-access-j2lmr\") pod \"177b2fa2-7b2c-4483-a0c5-2c3cf6ceeed2\" (UID: \"177b2fa2-7b2c-4483-a0c5-2c3cf6ceeed2\") " Oct 11 04:05:15 crc kubenswrapper[4798]: I1011 04:05:15.576738 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/177b2fa2-7b2c-4483-a0c5-2c3cf6ceeed2-bundle" (OuterVolumeSpecName: "bundle") pod "177b2fa2-7b2c-4483-a0c5-2c3cf6ceeed2" (UID: "177b2fa2-7b2c-4483-a0c5-2c3cf6ceeed2"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 04:05:15 crc kubenswrapper[4798]: I1011 04:05:15.590097 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/177b2fa2-7b2c-4483-a0c5-2c3cf6ceeed2-kube-api-access-j2lmr" (OuterVolumeSpecName: "kube-api-access-j2lmr") pod "177b2fa2-7b2c-4483-a0c5-2c3cf6ceeed2" (UID: "177b2fa2-7b2c-4483-a0c5-2c3cf6ceeed2"). InnerVolumeSpecName "kube-api-access-j2lmr". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 04:05:15 crc kubenswrapper[4798]: I1011 04:05:15.675539 4798 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/177b2fa2-7b2c-4483-a0c5-2c3cf6ceeed2-bundle\") on node \"crc\" DevicePath \"\"" Oct 11 04:05:15 crc kubenswrapper[4798]: I1011 04:05:15.675568 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-j2lmr\" (UniqueName: \"kubernetes.io/projected/177b2fa2-7b2c-4483-a0c5-2c3cf6ceeed2-kube-api-access-j2lmr\") on node \"crc\" DevicePath \"\"" Oct 11 04:05:16 crc kubenswrapper[4798]: I1011 04:05:16.011757 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/177b2fa2-7b2c-4483-a0c5-2c3cf6ceeed2-util" (OuterVolumeSpecName: "util") pod "177b2fa2-7b2c-4483-a0c5-2c3cf6ceeed2" (UID: "177b2fa2-7b2c-4483-a0c5-2c3cf6ceeed2"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 04:05:16 crc kubenswrapper[4798]: I1011 04:05:16.081228 4798 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/177b2fa2-7b2c-4483-a0c5-2c3cf6ceeed2-util\") on node \"crc\" DevicePath \"\"" Oct 11 04:05:16 crc kubenswrapper[4798]: I1011 04:05:16.134273 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835c55rlp" event={"ID":"177b2fa2-7b2c-4483-a0c5-2c3cf6ceeed2","Type":"ContainerDied","Data":"f633de598613bddce8785778040e5a2b1709e0f6169a1cf9fdcfc05d242e1d91"} Oct 11 04:05:16 crc kubenswrapper[4798]: I1011 04:05:16.134347 4798 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f633de598613bddce8785778040e5a2b1709e0f6169a1cf9fdcfc05d242e1d91" Oct 11 04:05:16 crc kubenswrapper[4798]: I1011 04:05:16.134370 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835c55rlp" Oct 11 04:05:17 crc kubenswrapper[4798]: I1011 04:05:17.735722 4798 scope.go:117] "RemoveContainer" containerID="b5b37de1a8a4b221f421a2bf4f4706f9b2e4ce65611438e10e1bf07daa2bf25a" Oct 11 04:05:18 crc kubenswrapper[4798]: I1011 04:05:18.148991 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-p6xdd_bd9c0e44-3329-422a-907b-e9e9bb6194cc/kube-multus/2.log" Oct 11 04:05:20 crc kubenswrapper[4798]: I1011 04:05:20.869577 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-operator-858ddd8f98-hgtkp"] Oct 11 04:05:20 crc kubenswrapper[4798]: E1011 04:05:20.869801 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="177b2fa2-7b2c-4483-a0c5-2c3cf6ceeed2" containerName="pull" Oct 11 04:05:20 crc kubenswrapper[4798]: I1011 04:05:20.869813 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="177b2fa2-7b2c-4483-a0c5-2c3cf6ceeed2" containerName="pull" Oct 11 04:05:20 crc kubenswrapper[4798]: E1011 04:05:20.869821 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="177b2fa2-7b2c-4483-a0c5-2c3cf6ceeed2" containerName="util" Oct 11 04:05:20 crc kubenswrapper[4798]: I1011 04:05:20.869827 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="177b2fa2-7b2c-4483-a0c5-2c3cf6ceeed2" containerName="util" Oct 11 04:05:20 crc kubenswrapper[4798]: E1011 04:05:20.869835 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="177b2fa2-7b2c-4483-a0c5-2c3cf6ceeed2" containerName="extract" Oct 11 04:05:20 crc kubenswrapper[4798]: I1011 04:05:20.869841 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="177b2fa2-7b2c-4483-a0c5-2c3cf6ceeed2" containerName="extract" Oct 11 04:05:20 crc kubenswrapper[4798]: I1011 04:05:20.869943 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="177b2fa2-7b2c-4483-a0c5-2c3cf6ceeed2" containerName="extract" Oct 11 04:05:20 crc kubenswrapper[4798]: I1011 04:05:20.870381 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-858ddd8f98-hgtkp" Oct 11 04:05:20 crc kubenswrapper[4798]: I1011 04:05:20.875225 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-operator-dockercfg-xljwx" Oct 11 04:05:20 crc kubenswrapper[4798]: I1011 04:05:20.875606 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"openshift-service-ca.crt" Oct 11 04:05:20 crc kubenswrapper[4798]: I1011 04:05:20.876456 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"kube-root-ca.crt" Oct 11 04:05:20 crc kubenswrapper[4798]: I1011 04:05:20.916997 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-858ddd8f98-hgtkp"] Oct 11 04:05:21 crc kubenswrapper[4798]: I1011 04:05:21.049005 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r4bd4\" (UniqueName: \"kubernetes.io/projected/211ee79e-e933-462d-9bf0-a287315deab3-kube-api-access-r4bd4\") pod \"nmstate-operator-858ddd8f98-hgtkp\" (UID: \"211ee79e-e933-462d-9bf0-a287315deab3\") " pod="openshift-nmstate/nmstate-operator-858ddd8f98-hgtkp" Oct 11 04:05:21 crc kubenswrapper[4798]: I1011 04:05:21.150709 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r4bd4\" (UniqueName: \"kubernetes.io/projected/211ee79e-e933-462d-9bf0-a287315deab3-kube-api-access-r4bd4\") pod \"nmstate-operator-858ddd8f98-hgtkp\" (UID: \"211ee79e-e933-462d-9bf0-a287315deab3\") " pod="openshift-nmstate/nmstate-operator-858ddd8f98-hgtkp" Oct 11 04:05:21 crc kubenswrapper[4798]: I1011 04:05:21.180794 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r4bd4\" (UniqueName: \"kubernetes.io/projected/211ee79e-e933-462d-9bf0-a287315deab3-kube-api-access-r4bd4\") pod \"nmstate-operator-858ddd8f98-hgtkp\" (UID: \"211ee79e-e933-462d-9bf0-a287315deab3\") " pod="openshift-nmstate/nmstate-operator-858ddd8f98-hgtkp" Oct 11 04:05:21 crc kubenswrapper[4798]: I1011 04:05:21.185233 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-858ddd8f98-hgtkp" Oct 11 04:05:21 crc kubenswrapper[4798]: I1011 04:05:21.413231 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-858ddd8f98-hgtkp"] Oct 11 04:05:21 crc kubenswrapper[4798]: W1011 04:05:21.426555 4798 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod211ee79e_e933_462d_9bf0_a287315deab3.slice/crio-d2a68bcfc2866e059234f2e131171adef5ab7b8e01b4f17dbe5512e8ffbd6db3 WatchSource:0}: Error finding container d2a68bcfc2866e059234f2e131171adef5ab7b8e01b4f17dbe5512e8ffbd6db3: Status 404 returned error can't find the container with id d2a68bcfc2866e059234f2e131171adef5ab7b8e01b4f17dbe5512e8ffbd6db3 Oct 11 04:05:22 crc kubenswrapper[4798]: I1011 04:05:22.175246 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-858ddd8f98-hgtkp" event={"ID":"211ee79e-e933-462d-9bf0-a287315deab3","Type":"ContainerStarted","Data":"d2a68bcfc2866e059234f2e131171adef5ab7b8e01b4f17dbe5512e8ffbd6db3"} Oct 11 04:05:24 crc kubenswrapper[4798]: I1011 04:05:24.191710 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-858ddd8f98-hgtkp" event={"ID":"211ee79e-e933-462d-9bf0-a287315deab3","Type":"ContainerStarted","Data":"0bf345a570c256cff6c087bc0d91886f60776a811f54eccfd0821dc383ca4822"} Oct 11 04:05:24 crc kubenswrapper[4798]: I1011 04:05:24.217639 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-operator-858ddd8f98-hgtkp" podStartSLOduration=1.867679967 podStartE2EDuration="4.217614924s" podCreationTimestamp="2025-10-11 04:05:20 +0000 UTC" firstStartedPulling="2025-10-11 04:05:21.439163623 +0000 UTC m=+616.775453329" lastFinishedPulling="2025-10-11 04:05:23.7890986 +0000 UTC m=+619.125388286" observedRunningTime="2025-10-11 04:05:24.211517478 +0000 UTC m=+619.547807234" watchObservedRunningTime="2025-10-11 04:05:24.217614924 +0000 UTC m=+619.553904640" Oct 11 04:05:29 crc kubenswrapper[4798]: I1011 04:05:29.868646 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-metrics-fdff9cb8d-9j84j"] Oct 11 04:05:29 crc kubenswrapper[4798]: I1011 04:05:29.870538 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-fdff9cb8d-9j84j" Oct 11 04:05:29 crc kubenswrapper[4798]: I1011 04:05:29.873210 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-handler-dockercfg-hbwt8" Oct 11 04:05:29 crc kubenswrapper[4798]: I1011 04:05:29.885878 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-webhook-6cdbc54649-ptpf8"] Oct 11 04:05:29 crc kubenswrapper[4798]: I1011 04:05:29.886585 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-6cdbc54649-ptpf8" Oct 11 04:05:29 crc kubenswrapper[4798]: I1011 04:05:29.888964 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"openshift-nmstate-webhook" Oct 11 04:05:29 crc kubenswrapper[4798]: I1011 04:05:29.906325 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-6cdbc54649-ptpf8"] Oct 11 04:05:29 crc kubenswrapper[4798]: I1011 04:05:29.925873 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-handler-gflt9"] Oct 11 04:05:29 crc kubenswrapper[4798]: I1011 04:05:29.926607 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-gflt9" Oct 11 04:05:29 crc kubenswrapper[4798]: I1011 04:05:29.940169 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-metrics-fdff9cb8d-9j84j"] Oct 11 04:05:29 crc kubenswrapper[4798]: I1011 04:05:29.995534 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2c8fq\" (UniqueName: \"kubernetes.io/projected/4c1789de-d93e-434d-bbd3-d1603457248b-kube-api-access-2c8fq\") pod \"nmstate-metrics-fdff9cb8d-9j84j\" (UID: \"4c1789de-d93e-434d-bbd3-d1603457248b\") " pod="openshift-nmstate/nmstate-metrics-fdff9cb8d-9j84j" Oct 11 04:05:29 crc kubenswrapper[4798]: I1011 04:05:29.995592 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/93ae23c3-aeb5-4d02-b48d-f1741886e18c-tls-key-pair\") pod \"nmstate-webhook-6cdbc54649-ptpf8\" (UID: \"93ae23c3-aeb5-4d02-b48d-f1741886e18c\") " pod="openshift-nmstate/nmstate-webhook-6cdbc54649-ptpf8" Oct 11 04:05:29 crc kubenswrapper[4798]: I1011 04:05:29.995627 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lqpx9\" (UniqueName: \"kubernetes.io/projected/93ae23c3-aeb5-4d02-b48d-f1741886e18c-kube-api-access-lqpx9\") pod \"nmstate-webhook-6cdbc54649-ptpf8\" (UID: \"93ae23c3-aeb5-4d02-b48d-f1741886e18c\") " pod="openshift-nmstate/nmstate-webhook-6cdbc54649-ptpf8" Oct 11 04:05:30 crc kubenswrapper[4798]: I1011 04:05:30.025750 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-console-plugin-6b874cbd85-dlwsb"] Oct 11 04:05:30 crc kubenswrapper[4798]: I1011 04:05:30.026497 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-6b874cbd85-dlwsb" Oct 11 04:05:30 crc kubenswrapper[4798]: I1011 04:05:30.028363 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"plugin-serving-cert" Oct 11 04:05:30 crc kubenswrapper[4798]: I1011 04:05:30.028841 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"nginx-conf" Oct 11 04:05:30 crc kubenswrapper[4798]: I1011 04:05:30.029903 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"default-dockercfg-wwhth" Oct 11 04:05:30 crc kubenswrapper[4798]: I1011 04:05:30.095783 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-6b874cbd85-dlwsb"] Oct 11 04:05:30 crc kubenswrapper[4798]: I1011 04:05:30.096541 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2c8fq\" (UniqueName: \"kubernetes.io/projected/4c1789de-d93e-434d-bbd3-d1603457248b-kube-api-access-2c8fq\") pod \"nmstate-metrics-fdff9cb8d-9j84j\" (UID: \"4c1789de-d93e-434d-bbd3-d1603457248b\") " pod="openshift-nmstate/nmstate-metrics-fdff9cb8d-9j84j" Oct 11 04:05:30 crc kubenswrapper[4798]: I1011 04:05:30.096615 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/93ae23c3-aeb5-4d02-b48d-f1741886e18c-tls-key-pair\") pod \"nmstate-webhook-6cdbc54649-ptpf8\" (UID: \"93ae23c3-aeb5-4d02-b48d-f1741886e18c\") " pod="openshift-nmstate/nmstate-webhook-6cdbc54649-ptpf8" Oct 11 04:05:30 crc kubenswrapper[4798]: I1011 04:05:30.096664 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8q6xb\" (UniqueName: \"kubernetes.io/projected/ac8be373-2f59-42fb-afb2-b4449dee5657-kube-api-access-8q6xb\") pod \"nmstate-handler-gflt9\" (UID: \"ac8be373-2f59-42fb-afb2-b4449dee5657\") " pod="openshift-nmstate/nmstate-handler-gflt9" Oct 11 04:05:30 crc kubenswrapper[4798]: I1011 04:05:30.096697 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lqpx9\" (UniqueName: \"kubernetes.io/projected/93ae23c3-aeb5-4d02-b48d-f1741886e18c-kube-api-access-lqpx9\") pod \"nmstate-webhook-6cdbc54649-ptpf8\" (UID: \"93ae23c3-aeb5-4d02-b48d-f1741886e18c\") " pod="openshift-nmstate/nmstate-webhook-6cdbc54649-ptpf8" Oct 11 04:05:30 crc kubenswrapper[4798]: I1011 04:05:30.096734 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/ac8be373-2f59-42fb-afb2-b4449dee5657-ovs-socket\") pod \"nmstate-handler-gflt9\" (UID: \"ac8be373-2f59-42fb-afb2-b4449dee5657\") " pod="openshift-nmstate/nmstate-handler-gflt9" Oct 11 04:05:30 crc kubenswrapper[4798]: I1011 04:05:30.096784 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/ac8be373-2f59-42fb-afb2-b4449dee5657-dbus-socket\") pod \"nmstate-handler-gflt9\" (UID: \"ac8be373-2f59-42fb-afb2-b4449dee5657\") " pod="openshift-nmstate/nmstate-handler-gflt9" Oct 11 04:05:30 crc kubenswrapper[4798]: I1011 04:05:30.096817 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/ac8be373-2f59-42fb-afb2-b4449dee5657-nmstate-lock\") pod \"nmstate-handler-gflt9\" (UID: \"ac8be373-2f59-42fb-afb2-b4449dee5657\") " pod="openshift-nmstate/nmstate-handler-gflt9" Oct 11 04:05:30 crc kubenswrapper[4798]: E1011 04:05:30.097080 4798 secret.go:188] Couldn't get secret openshift-nmstate/openshift-nmstate-webhook: secret "openshift-nmstate-webhook" not found Oct 11 04:05:30 crc kubenswrapper[4798]: E1011 04:05:30.097138 4798 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/93ae23c3-aeb5-4d02-b48d-f1741886e18c-tls-key-pair podName:93ae23c3-aeb5-4d02-b48d-f1741886e18c nodeName:}" failed. No retries permitted until 2025-10-11 04:05:30.597118652 +0000 UTC m=+625.933408348 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "tls-key-pair" (UniqueName: "kubernetes.io/secret/93ae23c3-aeb5-4d02-b48d-f1741886e18c-tls-key-pair") pod "nmstate-webhook-6cdbc54649-ptpf8" (UID: "93ae23c3-aeb5-4d02-b48d-f1741886e18c") : secret "openshift-nmstate-webhook" not found Oct 11 04:05:30 crc kubenswrapper[4798]: I1011 04:05:30.116023 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2c8fq\" (UniqueName: \"kubernetes.io/projected/4c1789de-d93e-434d-bbd3-d1603457248b-kube-api-access-2c8fq\") pod \"nmstate-metrics-fdff9cb8d-9j84j\" (UID: \"4c1789de-d93e-434d-bbd3-d1603457248b\") " pod="openshift-nmstate/nmstate-metrics-fdff9cb8d-9j84j" Oct 11 04:05:30 crc kubenswrapper[4798]: I1011 04:05:30.117073 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lqpx9\" (UniqueName: \"kubernetes.io/projected/93ae23c3-aeb5-4d02-b48d-f1741886e18c-kube-api-access-lqpx9\") pod \"nmstate-webhook-6cdbc54649-ptpf8\" (UID: \"93ae23c3-aeb5-4d02-b48d-f1741886e18c\") " pod="openshift-nmstate/nmstate-webhook-6cdbc54649-ptpf8" Oct 11 04:05:30 crc kubenswrapper[4798]: I1011 04:05:30.188885 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-fdff9cb8d-9j84j" Oct 11 04:05:30 crc kubenswrapper[4798]: I1011 04:05:30.198273 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/4db43a23-4466-4600-8b86-a39c6bd23319-nginx-conf\") pod \"nmstate-console-plugin-6b874cbd85-dlwsb\" (UID: \"4db43a23-4466-4600-8b86-a39c6bd23319\") " pod="openshift-nmstate/nmstate-console-plugin-6b874cbd85-dlwsb" Oct 11 04:05:30 crc kubenswrapper[4798]: I1011 04:05:30.198332 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/ac8be373-2f59-42fb-afb2-b4449dee5657-nmstate-lock\") pod \"nmstate-handler-gflt9\" (UID: \"ac8be373-2f59-42fb-afb2-b4449dee5657\") " pod="openshift-nmstate/nmstate-handler-gflt9" Oct 11 04:05:30 crc kubenswrapper[4798]: I1011 04:05:30.198410 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wb5m9\" (UniqueName: \"kubernetes.io/projected/4db43a23-4466-4600-8b86-a39c6bd23319-kube-api-access-wb5m9\") pod \"nmstate-console-plugin-6b874cbd85-dlwsb\" (UID: \"4db43a23-4466-4600-8b86-a39c6bd23319\") " pod="openshift-nmstate/nmstate-console-plugin-6b874cbd85-dlwsb" Oct 11 04:05:30 crc kubenswrapper[4798]: I1011 04:05:30.198434 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/ac8be373-2f59-42fb-afb2-b4449dee5657-nmstate-lock\") pod \"nmstate-handler-gflt9\" (UID: \"ac8be373-2f59-42fb-afb2-b4449dee5657\") " pod="openshift-nmstate/nmstate-handler-gflt9" Oct 11 04:05:30 crc kubenswrapper[4798]: I1011 04:05:30.198446 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/4db43a23-4466-4600-8b86-a39c6bd23319-plugin-serving-cert\") pod \"nmstate-console-plugin-6b874cbd85-dlwsb\" (UID: \"4db43a23-4466-4600-8b86-a39c6bd23319\") " pod="openshift-nmstate/nmstate-console-plugin-6b874cbd85-dlwsb" Oct 11 04:05:30 crc kubenswrapper[4798]: I1011 04:05:30.198526 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8q6xb\" (UniqueName: \"kubernetes.io/projected/ac8be373-2f59-42fb-afb2-b4449dee5657-kube-api-access-8q6xb\") pod \"nmstate-handler-gflt9\" (UID: \"ac8be373-2f59-42fb-afb2-b4449dee5657\") " pod="openshift-nmstate/nmstate-handler-gflt9" Oct 11 04:05:30 crc kubenswrapper[4798]: I1011 04:05:30.198570 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/ac8be373-2f59-42fb-afb2-b4449dee5657-ovs-socket\") pod \"nmstate-handler-gflt9\" (UID: \"ac8be373-2f59-42fb-afb2-b4449dee5657\") " pod="openshift-nmstate/nmstate-handler-gflt9" Oct 11 04:05:30 crc kubenswrapper[4798]: I1011 04:05:30.198590 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/ac8be373-2f59-42fb-afb2-b4449dee5657-dbus-socket\") pod \"nmstate-handler-gflt9\" (UID: \"ac8be373-2f59-42fb-afb2-b4449dee5657\") " pod="openshift-nmstate/nmstate-handler-gflt9" Oct 11 04:05:30 crc kubenswrapper[4798]: I1011 04:05:30.198772 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/ac8be373-2f59-42fb-afb2-b4449dee5657-ovs-socket\") pod \"nmstate-handler-gflt9\" (UID: \"ac8be373-2f59-42fb-afb2-b4449dee5657\") " pod="openshift-nmstate/nmstate-handler-gflt9" Oct 11 04:05:30 crc kubenswrapper[4798]: I1011 04:05:30.198917 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/ac8be373-2f59-42fb-afb2-b4449dee5657-dbus-socket\") pod \"nmstate-handler-gflt9\" (UID: \"ac8be373-2f59-42fb-afb2-b4449dee5657\") " pod="openshift-nmstate/nmstate-handler-gflt9" Oct 11 04:05:30 crc kubenswrapper[4798]: I1011 04:05:30.232200 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-59b7f8d4bf-9l4ts"] Oct 11 04:05:30 crc kubenswrapper[4798]: I1011 04:05:30.240328 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-59b7f8d4bf-9l4ts" Oct 11 04:05:30 crc kubenswrapper[4798]: I1011 04:05:30.244358 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8q6xb\" (UniqueName: \"kubernetes.io/projected/ac8be373-2f59-42fb-afb2-b4449dee5657-kube-api-access-8q6xb\") pod \"nmstate-handler-gflt9\" (UID: \"ac8be373-2f59-42fb-afb2-b4449dee5657\") " pod="openshift-nmstate/nmstate-handler-gflt9" Oct 11 04:05:30 crc kubenswrapper[4798]: I1011 04:05:30.250996 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-gflt9" Oct 11 04:05:30 crc kubenswrapper[4798]: I1011 04:05:30.253561 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-59b7f8d4bf-9l4ts"] Oct 11 04:05:30 crc kubenswrapper[4798]: I1011 04:05:30.305491 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/4db43a23-4466-4600-8b86-a39c6bd23319-nginx-conf\") pod \"nmstate-console-plugin-6b874cbd85-dlwsb\" (UID: \"4db43a23-4466-4600-8b86-a39c6bd23319\") " pod="openshift-nmstate/nmstate-console-plugin-6b874cbd85-dlwsb" Oct 11 04:05:30 crc kubenswrapper[4798]: I1011 04:05:30.305582 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wb5m9\" (UniqueName: \"kubernetes.io/projected/4db43a23-4466-4600-8b86-a39c6bd23319-kube-api-access-wb5m9\") pod \"nmstate-console-plugin-6b874cbd85-dlwsb\" (UID: \"4db43a23-4466-4600-8b86-a39c6bd23319\") " pod="openshift-nmstate/nmstate-console-plugin-6b874cbd85-dlwsb" Oct 11 04:05:30 crc kubenswrapper[4798]: I1011 04:05:30.305610 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/4db43a23-4466-4600-8b86-a39c6bd23319-plugin-serving-cert\") pod \"nmstate-console-plugin-6b874cbd85-dlwsb\" (UID: \"4db43a23-4466-4600-8b86-a39c6bd23319\") " pod="openshift-nmstate/nmstate-console-plugin-6b874cbd85-dlwsb" Oct 11 04:05:30 crc kubenswrapper[4798]: E1011 04:05:30.305769 4798 secret.go:188] Couldn't get secret openshift-nmstate/plugin-serving-cert: secret "plugin-serving-cert" not found Oct 11 04:05:30 crc kubenswrapper[4798]: E1011 04:05:30.305835 4798 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/4db43a23-4466-4600-8b86-a39c6bd23319-plugin-serving-cert podName:4db43a23-4466-4600-8b86-a39c6bd23319 nodeName:}" failed. No retries permitted until 2025-10-11 04:05:30.805820001 +0000 UTC m=+626.142109677 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "plugin-serving-cert" (UniqueName: "kubernetes.io/secret/4db43a23-4466-4600-8b86-a39c6bd23319-plugin-serving-cert") pod "nmstate-console-plugin-6b874cbd85-dlwsb" (UID: "4db43a23-4466-4600-8b86-a39c6bd23319") : secret "plugin-serving-cert" not found Oct 11 04:05:30 crc kubenswrapper[4798]: I1011 04:05:30.307771 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/4db43a23-4466-4600-8b86-a39c6bd23319-nginx-conf\") pod \"nmstate-console-plugin-6b874cbd85-dlwsb\" (UID: \"4db43a23-4466-4600-8b86-a39c6bd23319\") " pod="openshift-nmstate/nmstate-console-plugin-6b874cbd85-dlwsb" Oct 11 04:05:30 crc kubenswrapper[4798]: I1011 04:05:30.333853 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wb5m9\" (UniqueName: \"kubernetes.io/projected/4db43a23-4466-4600-8b86-a39c6bd23319-kube-api-access-wb5m9\") pod \"nmstate-console-plugin-6b874cbd85-dlwsb\" (UID: \"4db43a23-4466-4600-8b86-a39c6bd23319\") " pod="openshift-nmstate/nmstate-console-plugin-6b874cbd85-dlwsb" Oct 11 04:05:30 crc kubenswrapper[4798]: I1011 04:05:30.406283 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/39699106-5cb1-4509-a7e0-a0e7e0eff6ee-console-serving-cert\") pod \"console-59b7f8d4bf-9l4ts\" (UID: \"39699106-5cb1-4509-a7e0-a0e7e0eff6ee\") " pod="openshift-console/console-59b7f8d4bf-9l4ts" Oct 11 04:05:30 crc kubenswrapper[4798]: I1011 04:05:30.406335 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/39699106-5cb1-4509-a7e0-a0e7e0eff6ee-console-config\") pod \"console-59b7f8d4bf-9l4ts\" (UID: \"39699106-5cb1-4509-a7e0-a0e7e0eff6ee\") " pod="openshift-console/console-59b7f8d4bf-9l4ts" Oct 11 04:05:30 crc kubenswrapper[4798]: I1011 04:05:30.406353 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/39699106-5cb1-4509-a7e0-a0e7e0eff6ee-console-oauth-config\") pod \"console-59b7f8d4bf-9l4ts\" (UID: \"39699106-5cb1-4509-a7e0-a0e7e0eff6ee\") " pod="openshift-console/console-59b7f8d4bf-9l4ts" Oct 11 04:05:30 crc kubenswrapper[4798]: I1011 04:05:30.406373 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/39699106-5cb1-4509-a7e0-a0e7e0eff6ee-oauth-serving-cert\") pod \"console-59b7f8d4bf-9l4ts\" (UID: \"39699106-5cb1-4509-a7e0-a0e7e0eff6ee\") " pod="openshift-console/console-59b7f8d4bf-9l4ts" Oct 11 04:05:30 crc kubenswrapper[4798]: I1011 04:05:30.406583 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/39699106-5cb1-4509-a7e0-a0e7e0eff6ee-service-ca\") pod \"console-59b7f8d4bf-9l4ts\" (UID: \"39699106-5cb1-4509-a7e0-a0e7e0eff6ee\") " pod="openshift-console/console-59b7f8d4bf-9l4ts" Oct 11 04:05:30 crc kubenswrapper[4798]: I1011 04:05:30.406661 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/39699106-5cb1-4509-a7e0-a0e7e0eff6ee-trusted-ca-bundle\") pod \"console-59b7f8d4bf-9l4ts\" (UID: \"39699106-5cb1-4509-a7e0-a0e7e0eff6ee\") " pod="openshift-console/console-59b7f8d4bf-9l4ts" Oct 11 04:05:30 crc kubenswrapper[4798]: I1011 04:05:30.406954 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t5t9h\" (UniqueName: \"kubernetes.io/projected/39699106-5cb1-4509-a7e0-a0e7e0eff6ee-kube-api-access-t5t9h\") pod \"console-59b7f8d4bf-9l4ts\" (UID: \"39699106-5cb1-4509-a7e0-a0e7e0eff6ee\") " pod="openshift-console/console-59b7f8d4bf-9l4ts" Oct 11 04:05:30 crc kubenswrapper[4798]: I1011 04:05:30.509902 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/39699106-5cb1-4509-a7e0-a0e7e0eff6ee-console-serving-cert\") pod \"console-59b7f8d4bf-9l4ts\" (UID: \"39699106-5cb1-4509-a7e0-a0e7e0eff6ee\") " pod="openshift-console/console-59b7f8d4bf-9l4ts" Oct 11 04:05:30 crc kubenswrapper[4798]: I1011 04:05:30.510156 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/39699106-5cb1-4509-a7e0-a0e7e0eff6ee-console-config\") pod \"console-59b7f8d4bf-9l4ts\" (UID: \"39699106-5cb1-4509-a7e0-a0e7e0eff6ee\") " pod="openshift-console/console-59b7f8d4bf-9l4ts" Oct 11 04:05:30 crc kubenswrapper[4798]: I1011 04:05:30.510564 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/39699106-5cb1-4509-a7e0-a0e7e0eff6ee-console-oauth-config\") pod \"console-59b7f8d4bf-9l4ts\" (UID: \"39699106-5cb1-4509-a7e0-a0e7e0eff6ee\") " pod="openshift-console/console-59b7f8d4bf-9l4ts" Oct 11 04:05:30 crc kubenswrapper[4798]: I1011 04:05:30.510596 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/39699106-5cb1-4509-a7e0-a0e7e0eff6ee-oauth-serving-cert\") pod \"console-59b7f8d4bf-9l4ts\" (UID: \"39699106-5cb1-4509-a7e0-a0e7e0eff6ee\") " pod="openshift-console/console-59b7f8d4bf-9l4ts" Oct 11 04:05:30 crc kubenswrapper[4798]: I1011 04:05:30.510627 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/39699106-5cb1-4509-a7e0-a0e7e0eff6ee-service-ca\") pod \"console-59b7f8d4bf-9l4ts\" (UID: \"39699106-5cb1-4509-a7e0-a0e7e0eff6ee\") " pod="openshift-console/console-59b7f8d4bf-9l4ts" Oct 11 04:05:30 crc kubenswrapper[4798]: I1011 04:05:30.510660 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/39699106-5cb1-4509-a7e0-a0e7e0eff6ee-trusted-ca-bundle\") pod \"console-59b7f8d4bf-9l4ts\" (UID: \"39699106-5cb1-4509-a7e0-a0e7e0eff6ee\") " pod="openshift-console/console-59b7f8d4bf-9l4ts" Oct 11 04:05:30 crc kubenswrapper[4798]: I1011 04:05:30.510743 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t5t9h\" (UniqueName: \"kubernetes.io/projected/39699106-5cb1-4509-a7e0-a0e7e0eff6ee-kube-api-access-t5t9h\") pod \"console-59b7f8d4bf-9l4ts\" (UID: \"39699106-5cb1-4509-a7e0-a0e7e0eff6ee\") " pod="openshift-console/console-59b7f8d4bf-9l4ts" Oct 11 04:05:30 crc kubenswrapper[4798]: I1011 04:05:30.511560 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/39699106-5cb1-4509-a7e0-a0e7e0eff6ee-console-config\") pod \"console-59b7f8d4bf-9l4ts\" (UID: \"39699106-5cb1-4509-a7e0-a0e7e0eff6ee\") " pod="openshift-console/console-59b7f8d4bf-9l4ts" Oct 11 04:05:30 crc kubenswrapper[4798]: I1011 04:05:30.513077 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/39699106-5cb1-4509-a7e0-a0e7e0eff6ee-service-ca\") pod \"console-59b7f8d4bf-9l4ts\" (UID: \"39699106-5cb1-4509-a7e0-a0e7e0eff6ee\") " pod="openshift-console/console-59b7f8d4bf-9l4ts" Oct 11 04:05:30 crc kubenswrapper[4798]: I1011 04:05:30.513771 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/39699106-5cb1-4509-a7e0-a0e7e0eff6ee-oauth-serving-cert\") pod \"console-59b7f8d4bf-9l4ts\" (UID: \"39699106-5cb1-4509-a7e0-a0e7e0eff6ee\") " pod="openshift-console/console-59b7f8d4bf-9l4ts" Oct 11 04:05:30 crc kubenswrapper[4798]: I1011 04:05:30.514020 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/39699106-5cb1-4509-a7e0-a0e7e0eff6ee-trusted-ca-bundle\") pod \"console-59b7f8d4bf-9l4ts\" (UID: \"39699106-5cb1-4509-a7e0-a0e7e0eff6ee\") " pod="openshift-console/console-59b7f8d4bf-9l4ts" Oct 11 04:05:30 crc kubenswrapper[4798]: I1011 04:05:30.515824 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/39699106-5cb1-4509-a7e0-a0e7e0eff6ee-console-serving-cert\") pod \"console-59b7f8d4bf-9l4ts\" (UID: \"39699106-5cb1-4509-a7e0-a0e7e0eff6ee\") " pod="openshift-console/console-59b7f8d4bf-9l4ts" Oct 11 04:05:30 crc kubenswrapper[4798]: I1011 04:05:30.519133 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/39699106-5cb1-4509-a7e0-a0e7e0eff6ee-console-oauth-config\") pod \"console-59b7f8d4bf-9l4ts\" (UID: \"39699106-5cb1-4509-a7e0-a0e7e0eff6ee\") " pod="openshift-console/console-59b7f8d4bf-9l4ts" Oct 11 04:05:30 crc kubenswrapper[4798]: I1011 04:05:30.529383 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t5t9h\" (UniqueName: \"kubernetes.io/projected/39699106-5cb1-4509-a7e0-a0e7e0eff6ee-kube-api-access-t5t9h\") pod \"console-59b7f8d4bf-9l4ts\" (UID: \"39699106-5cb1-4509-a7e0-a0e7e0eff6ee\") " pod="openshift-console/console-59b7f8d4bf-9l4ts" Oct 11 04:05:30 crc kubenswrapper[4798]: I1011 04:05:30.578177 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-59b7f8d4bf-9l4ts" Oct 11 04:05:30 crc kubenswrapper[4798]: I1011 04:05:30.612035 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/93ae23c3-aeb5-4d02-b48d-f1741886e18c-tls-key-pair\") pod \"nmstate-webhook-6cdbc54649-ptpf8\" (UID: \"93ae23c3-aeb5-4d02-b48d-f1741886e18c\") " pod="openshift-nmstate/nmstate-webhook-6cdbc54649-ptpf8" Oct 11 04:05:30 crc kubenswrapper[4798]: I1011 04:05:30.616909 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/93ae23c3-aeb5-4d02-b48d-f1741886e18c-tls-key-pair\") pod \"nmstate-webhook-6cdbc54649-ptpf8\" (UID: \"93ae23c3-aeb5-4d02-b48d-f1741886e18c\") " pod="openshift-nmstate/nmstate-webhook-6cdbc54649-ptpf8" Oct 11 04:05:30 crc kubenswrapper[4798]: I1011 04:05:30.632636 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-metrics-fdff9cb8d-9j84j"] Oct 11 04:05:30 crc kubenswrapper[4798]: W1011 04:05:30.651919 4798 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4c1789de_d93e_434d_bbd3_d1603457248b.slice/crio-b0f6df54f4d8be6caf9b24b938d157a7842a81399769b5c9d73c52aac5009fd3 WatchSource:0}: Error finding container b0f6df54f4d8be6caf9b24b938d157a7842a81399769b5c9d73c52aac5009fd3: Status 404 returned error can't find the container with id b0f6df54f4d8be6caf9b24b938d157a7842a81399769b5c9d73c52aac5009fd3 Oct 11 04:05:30 crc kubenswrapper[4798]: I1011 04:05:30.801690 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-6cdbc54649-ptpf8" Oct 11 04:05:30 crc kubenswrapper[4798]: I1011 04:05:30.814315 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/4db43a23-4466-4600-8b86-a39c6bd23319-plugin-serving-cert\") pod \"nmstate-console-plugin-6b874cbd85-dlwsb\" (UID: \"4db43a23-4466-4600-8b86-a39c6bd23319\") " pod="openshift-nmstate/nmstate-console-plugin-6b874cbd85-dlwsb" Oct 11 04:05:30 crc kubenswrapper[4798]: I1011 04:05:30.818656 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/4db43a23-4466-4600-8b86-a39c6bd23319-plugin-serving-cert\") pod \"nmstate-console-plugin-6b874cbd85-dlwsb\" (UID: \"4db43a23-4466-4600-8b86-a39c6bd23319\") " pod="openshift-nmstate/nmstate-console-plugin-6b874cbd85-dlwsb" Oct 11 04:05:30 crc kubenswrapper[4798]: I1011 04:05:30.831235 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-59b7f8d4bf-9l4ts"] Oct 11 04:05:30 crc kubenswrapper[4798]: W1011 04:05:30.838455 4798 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod39699106_5cb1_4509_a7e0_a0e7e0eff6ee.slice/crio-6de42f2e4970610eabe300d17417f486d2102a7a45b10176b12bb67e727f1825 WatchSource:0}: Error finding container 6de42f2e4970610eabe300d17417f486d2102a7a45b10176b12bb67e727f1825: Status 404 returned error can't find the container with id 6de42f2e4970610eabe300d17417f486d2102a7a45b10176b12bb67e727f1825 Oct 11 04:05:30 crc kubenswrapper[4798]: I1011 04:05:30.973454 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-6b874cbd85-dlwsb" Oct 11 04:05:31 crc kubenswrapper[4798]: I1011 04:05:31.025267 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-6cdbc54649-ptpf8"] Oct 11 04:05:31 crc kubenswrapper[4798]: W1011 04:05:31.046704 4798 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod93ae23c3_aeb5_4d02_b48d_f1741886e18c.slice/crio-de6f44cb17921bf6bd35edf9aeaa85bd1136f8bb7e54da05ab03787e21111b4a WatchSource:0}: Error finding container de6f44cb17921bf6bd35edf9aeaa85bd1136f8bb7e54da05ab03787e21111b4a: Status 404 returned error can't find the container with id de6f44cb17921bf6bd35edf9aeaa85bd1136f8bb7e54da05ab03787e21111b4a Oct 11 04:05:31 crc kubenswrapper[4798]: I1011 04:05:31.243821 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-6b874cbd85-dlwsb"] Oct 11 04:05:31 crc kubenswrapper[4798]: I1011 04:05:31.245830 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-6cdbc54649-ptpf8" event={"ID":"93ae23c3-aeb5-4d02-b48d-f1741886e18c","Type":"ContainerStarted","Data":"de6f44cb17921bf6bd35edf9aeaa85bd1136f8bb7e54da05ab03787e21111b4a"} Oct 11 04:05:31 crc kubenswrapper[4798]: I1011 04:05:31.246765 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-gflt9" event={"ID":"ac8be373-2f59-42fb-afb2-b4449dee5657","Type":"ContainerStarted","Data":"0047feea139ef03f662a1903ea65c40adf2c213fe292982492c4de400e83a4cc"} Oct 11 04:05:31 crc kubenswrapper[4798]: I1011 04:05:31.247640 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-fdff9cb8d-9j84j" event={"ID":"4c1789de-d93e-434d-bbd3-d1603457248b","Type":"ContainerStarted","Data":"b0f6df54f4d8be6caf9b24b938d157a7842a81399769b5c9d73c52aac5009fd3"} Oct 11 04:05:31 crc kubenswrapper[4798]: I1011 04:05:31.249043 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-59b7f8d4bf-9l4ts" event={"ID":"39699106-5cb1-4509-a7e0-a0e7e0eff6ee","Type":"ContainerStarted","Data":"b088b81e66d343ec825025e3e3e5409f7bcf91ffb4538b5ca59a351a17c570e8"} Oct 11 04:05:31 crc kubenswrapper[4798]: I1011 04:05:31.249060 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-59b7f8d4bf-9l4ts" event={"ID":"39699106-5cb1-4509-a7e0-a0e7e0eff6ee","Type":"ContainerStarted","Data":"6de42f2e4970610eabe300d17417f486d2102a7a45b10176b12bb67e727f1825"} Oct 11 04:05:31 crc kubenswrapper[4798]: W1011 04:05:31.251925 4798 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4db43a23_4466_4600_8b86_a39c6bd23319.slice/crio-d759f55e7d18224c896d34a7c9884137c36de28eea6c9d06db16f845dc03ef40 WatchSource:0}: Error finding container d759f55e7d18224c896d34a7c9884137c36de28eea6c9d06db16f845dc03ef40: Status 404 returned error can't find the container with id d759f55e7d18224c896d34a7c9884137c36de28eea6c9d06db16f845dc03ef40 Oct 11 04:05:31 crc kubenswrapper[4798]: I1011 04:05:31.275590 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-59b7f8d4bf-9l4ts" podStartSLOduration=1.2755683389999999 podStartE2EDuration="1.275568339s" podCreationTimestamp="2025-10-11 04:05:30 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 04:05:31.271661135 +0000 UTC m=+626.607950841" watchObservedRunningTime="2025-10-11 04:05:31.275568339 +0000 UTC m=+626.611858025" Oct 11 04:05:32 crc kubenswrapper[4798]: I1011 04:05:32.258767 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-6b874cbd85-dlwsb" event={"ID":"4db43a23-4466-4600-8b86-a39c6bd23319","Type":"ContainerStarted","Data":"d759f55e7d18224c896d34a7c9884137c36de28eea6c9d06db16f845dc03ef40"} Oct 11 04:05:35 crc kubenswrapper[4798]: I1011 04:05:35.280751 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-fdff9cb8d-9j84j" event={"ID":"4c1789de-d93e-434d-bbd3-d1603457248b","Type":"ContainerStarted","Data":"0b07af08ced9926cf0f31eaede4e69a03cb2ddec369fff32f475917f3c98a87a"} Oct 11 04:05:35 crc kubenswrapper[4798]: I1011 04:05:35.282009 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-6b874cbd85-dlwsb" event={"ID":"4db43a23-4466-4600-8b86-a39c6bd23319","Type":"ContainerStarted","Data":"db486160d84b5ba7854bdff8ed761708b3397ce4c5f115313eafa29909c7cfb9"} Oct 11 04:05:35 crc kubenswrapper[4798]: I1011 04:05:35.283023 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-6cdbc54649-ptpf8" event={"ID":"93ae23c3-aeb5-4d02-b48d-f1741886e18c","Type":"ContainerStarted","Data":"332ba00dc053014fc661485ef3ba20a9bc169e176df120dceec7341da2160af9"} Oct 11 04:05:35 crc kubenswrapper[4798]: I1011 04:05:35.283157 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-webhook-6cdbc54649-ptpf8" Oct 11 04:05:35 crc kubenswrapper[4798]: I1011 04:05:35.285935 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-gflt9" event={"ID":"ac8be373-2f59-42fb-afb2-b4449dee5657","Type":"ContainerStarted","Data":"7b1e5ece7277f0961e9d7ac598785326147280e49ea1edd7fa95b9794864f512"} Oct 11 04:05:35 crc kubenswrapper[4798]: I1011 04:05:35.286155 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-handler-gflt9" Oct 11 04:05:35 crc kubenswrapper[4798]: I1011 04:05:35.308120 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-console-plugin-6b874cbd85-dlwsb" podStartSLOduration=2.176002297 podStartE2EDuration="5.308098508s" podCreationTimestamp="2025-10-11 04:05:30 +0000 UTC" firstStartedPulling="2025-10-11 04:05:31.255736534 +0000 UTC m=+626.592026220" lastFinishedPulling="2025-10-11 04:05:34.387832705 +0000 UTC m=+629.724122431" observedRunningTime="2025-10-11 04:05:35.297428462 +0000 UTC m=+630.633718198" watchObservedRunningTime="2025-10-11 04:05:35.308098508 +0000 UTC m=+630.644388194" Oct 11 04:05:35 crc kubenswrapper[4798]: I1011 04:05:35.323514 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-webhook-6cdbc54649-ptpf8" podStartSLOduration=2.951344406 podStartE2EDuration="6.323496116s" podCreationTimestamp="2025-10-11 04:05:29 +0000 UTC" firstStartedPulling="2025-10-11 04:05:31.048224804 +0000 UTC m=+626.384514490" lastFinishedPulling="2025-10-11 04:05:34.420376514 +0000 UTC m=+629.756666200" observedRunningTime="2025-10-11 04:05:35.317664927 +0000 UTC m=+630.653954633" watchObservedRunningTime="2025-10-11 04:05:35.323496116 +0000 UTC m=+630.659785802" Oct 11 04:05:35 crc kubenswrapper[4798]: I1011 04:05:35.344030 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-handler-gflt9" podStartSLOduration=2.242776174 podStartE2EDuration="6.344009108s" podCreationTimestamp="2025-10-11 04:05:29 +0000 UTC" firstStartedPulling="2025-10-11 04:05:30.285960645 +0000 UTC m=+625.622250341" lastFinishedPulling="2025-10-11 04:05:34.387193589 +0000 UTC m=+629.723483275" observedRunningTime="2025-10-11 04:05:35.335527545 +0000 UTC m=+630.671817251" watchObservedRunningTime="2025-10-11 04:05:35.344009108 +0000 UTC m=+630.680298794" Oct 11 04:05:37 crc kubenswrapper[4798]: I1011 04:05:37.310042 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-fdff9cb8d-9j84j" event={"ID":"4c1789de-d93e-434d-bbd3-d1603457248b","Type":"ContainerStarted","Data":"c58617396f52325fe40685c69be942592a8ca2ec1b71773c20f872b0013be194"} Oct 11 04:05:37 crc kubenswrapper[4798]: I1011 04:05:37.336124 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-metrics-fdff9cb8d-9j84j" podStartSLOduration=2.110958297 podStartE2EDuration="8.336099944s" podCreationTimestamp="2025-10-11 04:05:29 +0000 UTC" firstStartedPulling="2025-10-11 04:05:30.657791932 +0000 UTC m=+625.994081618" lastFinishedPulling="2025-10-11 04:05:36.882933579 +0000 UTC m=+632.219223265" observedRunningTime="2025-10-11 04:05:37.335832627 +0000 UTC m=+632.672122303" watchObservedRunningTime="2025-10-11 04:05:37.336099944 +0000 UTC m=+632.672389640" Oct 11 04:05:40 crc kubenswrapper[4798]: I1011 04:05:40.281599 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-handler-gflt9" Oct 11 04:05:40 crc kubenswrapper[4798]: I1011 04:05:40.578541 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-59b7f8d4bf-9l4ts" Oct 11 04:05:40 crc kubenswrapper[4798]: I1011 04:05:40.578645 4798 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-59b7f8d4bf-9l4ts" Oct 11 04:05:40 crc kubenswrapper[4798]: I1011 04:05:40.585339 4798 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-59b7f8d4bf-9l4ts" Oct 11 04:05:41 crc kubenswrapper[4798]: I1011 04:05:41.362903 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-59b7f8d4bf-9l4ts" Oct 11 04:05:41 crc kubenswrapper[4798]: I1011 04:05:41.447984 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f9d7485db-5lpj9"] Oct 11 04:05:50 crc kubenswrapper[4798]: I1011 04:05:50.810514 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-webhook-6cdbc54649-ptpf8" Oct 11 04:06:05 crc kubenswrapper[4798]: I1011 04:06:05.149209 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d2vws4k"] Oct 11 04:06:05 crc kubenswrapper[4798]: I1011 04:06:05.150832 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d2vws4k" Oct 11 04:06:05 crc kubenswrapper[4798]: I1011 04:06:05.154281 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Oct 11 04:06:05 crc kubenswrapper[4798]: I1011 04:06:05.165192 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d2vws4k"] Oct 11 04:06:05 crc kubenswrapper[4798]: I1011 04:06:05.189169 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/c25b8b69-843d-4c9e-80f8-0e41b7f8aec3-util\") pod \"8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d2vws4k\" (UID: \"c25b8b69-843d-4c9e-80f8-0e41b7f8aec3\") " pod="openshift-marketplace/8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d2vws4k" Oct 11 04:06:05 crc kubenswrapper[4798]: I1011 04:06:05.189465 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/c25b8b69-843d-4c9e-80f8-0e41b7f8aec3-bundle\") pod \"8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d2vws4k\" (UID: \"c25b8b69-843d-4c9e-80f8-0e41b7f8aec3\") " pod="openshift-marketplace/8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d2vws4k" Oct 11 04:06:05 crc kubenswrapper[4798]: I1011 04:06:05.189587 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m57zm\" (UniqueName: \"kubernetes.io/projected/c25b8b69-843d-4c9e-80f8-0e41b7f8aec3-kube-api-access-m57zm\") pod \"8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d2vws4k\" (UID: \"c25b8b69-843d-4c9e-80f8-0e41b7f8aec3\") " pod="openshift-marketplace/8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d2vws4k" Oct 11 04:06:05 crc kubenswrapper[4798]: I1011 04:06:05.290767 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/c25b8b69-843d-4c9e-80f8-0e41b7f8aec3-bundle\") pod \"8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d2vws4k\" (UID: \"c25b8b69-843d-4c9e-80f8-0e41b7f8aec3\") " pod="openshift-marketplace/8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d2vws4k" Oct 11 04:06:05 crc kubenswrapper[4798]: I1011 04:06:05.290836 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m57zm\" (UniqueName: \"kubernetes.io/projected/c25b8b69-843d-4c9e-80f8-0e41b7f8aec3-kube-api-access-m57zm\") pod \"8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d2vws4k\" (UID: \"c25b8b69-843d-4c9e-80f8-0e41b7f8aec3\") " pod="openshift-marketplace/8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d2vws4k" Oct 11 04:06:05 crc kubenswrapper[4798]: I1011 04:06:05.290869 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/c25b8b69-843d-4c9e-80f8-0e41b7f8aec3-util\") pod \"8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d2vws4k\" (UID: \"c25b8b69-843d-4c9e-80f8-0e41b7f8aec3\") " pod="openshift-marketplace/8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d2vws4k" Oct 11 04:06:05 crc kubenswrapper[4798]: I1011 04:06:05.291328 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/c25b8b69-843d-4c9e-80f8-0e41b7f8aec3-util\") pod \"8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d2vws4k\" (UID: \"c25b8b69-843d-4c9e-80f8-0e41b7f8aec3\") " pod="openshift-marketplace/8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d2vws4k" Oct 11 04:06:05 crc kubenswrapper[4798]: I1011 04:06:05.291376 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/c25b8b69-843d-4c9e-80f8-0e41b7f8aec3-bundle\") pod \"8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d2vws4k\" (UID: \"c25b8b69-843d-4c9e-80f8-0e41b7f8aec3\") " pod="openshift-marketplace/8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d2vws4k" Oct 11 04:06:05 crc kubenswrapper[4798]: I1011 04:06:05.313640 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m57zm\" (UniqueName: \"kubernetes.io/projected/c25b8b69-843d-4c9e-80f8-0e41b7f8aec3-kube-api-access-m57zm\") pod \"8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d2vws4k\" (UID: \"c25b8b69-843d-4c9e-80f8-0e41b7f8aec3\") " pod="openshift-marketplace/8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d2vws4k" Oct 11 04:06:05 crc kubenswrapper[4798]: I1011 04:06:05.468878 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Oct 11 04:06:05 crc kubenswrapper[4798]: I1011 04:06:05.477619 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d2vws4k" Oct 11 04:06:05 crc kubenswrapper[4798]: I1011 04:06:05.879075 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d2vws4k"] Oct 11 04:06:05 crc kubenswrapper[4798]: W1011 04:06:05.886627 4798 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc25b8b69_843d_4c9e_80f8_0e41b7f8aec3.slice/crio-de7b41e9edf3b37674a616266930fb058b8a9623e4a01f87c7d44273896c6854 WatchSource:0}: Error finding container de7b41e9edf3b37674a616266930fb058b8a9623e4a01f87c7d44273896c6854: Status 404 returned error can't find the container with id de7b41e9edf3b37674a616266930fb058b8a9623e4a01f87c7d44273896c6854 Oct 11 04:06:06 crc kubenswrapper[4798]: I1011 04:06:06.543344 4798 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/console-f9d7485db-5lpj9" podUID="79cb45d2-e74c-4bda-8464-2c2bbcdbcc4a" containerName="console" containerID="cri-o://fc7e27c5c34b193a1868b23b9ec01cb75ad09036a180fff1fe437da649f98ba6" gracePeriod=15 Oct 11 04:06:06 crc kubenswrapper[4798]: I1011 04:06:06.553891 4798 generic.go:334] "Generic (PLEG): container finished" podID="c25b8b69-843d-4c9e-80f8-0e41b7f8aec3" containerID="584e6ee97d09935f9245c81f014f3d36c8b369338f71bd2ece8301e527923773" exitCode=0 Oct 11 04:06:06 crc kubenswrapper[4798]: I1011 04:06:06.553950 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d2vws4k" event={"ID":"c25b8b69-843d-4c9e-80f8-0e41b7f8aec3","Type":"ContainerDied","Data":"584e6ee97d09935f9245c81f014f3d36c8b369338f71bd2ece8301e527923773"} Oct 11 04:06:06 crc kubenswrapper[4798]: I1011 04:06:06.554029 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d2vws4k" event={"ID":"c25b8b69-843d-4c9e-80f8-0e41b7f8aec3","Type":"ContainerStarted","Data":"de7b41e9edf3b37674a616266930fb058b8a9623e4a01f87c7d44273896c6854"} Oct 11 04:06:06 crc kubenswrapper[4798]: I1011 04:06:06.663016 4798 patch_prober.go:28] interesting pod/console-f9d7485db-5lpj9 container/console namespace/openshift-console: Readiness probe status=failure output="Get \"https://10.217.0.20:8443/health\": dial tcp 10.217.0.20:8443: connect: connection refused" start-of-body= Oct 11 04:06:06 crc kubenswrapper[4798]: I1011 04:06:06.663071 4798 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/console-f9d7485db-5lpj9" podUID="79cb45d2-e74c-4bda-8464-2c2bbcdbcc4a" containerName="console" probeResult="failure" output="Get \"https://10.217.0.20:8443/health\": dial tcp 10.217.0.20:8443: connect: connection refused" Oct 11 04:06:06 crc kubenswrapper[4798]: I1011 04:06:06.932206 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-5lpj9_79cb45d2-e74c-4bda-8464-2c2bbcdbcc4a/console/0.log" Oct 11 04:06:06 crc kubenswrapper[4798]: I1011 04:06:06.932738 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-5lpj9" Oct 11 04:06:07 crc kubenswrapper[4798]: I1011 04:06:07.055491 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/79cb45d2-e74c-4bda-8464-2c2bbcdbcc4a-service-ca\") pod \"79cb45d2-e74c-4bda-8464-2c2bbcdbcc4a\" (UID: \"79cb45d2-e74c-4bda-8464-2c2bbcdbcc4a\") " Oct 11 04:06:07 crc kubenswrapper[4798]: I1011 04:06:07.055569 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/79cb45d2-e74c-4bda-8464-2c2bbcdbcc4a-console-oauth-config\") pod \"79cb45d2-e74c-4bda-8464-2c2bbcdbcc4a\" (UID: \"79cb45d2-e74c-4bda-8464-2c2bbcdbcc4a\") " Oct 11 04:06:07 crc kubenswrapper[4798]: I1011 04:06:07.055662 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/79cb45d2-e74c-4bda-8464-2c2bbcdbcc4a-console-config\") pod \"79cb45d2-e74c-4bda-8464-2c2bbcdbcc4a\" (UID: \"79cb45d2-e74c-4bda-8464-2c2bbcdbcc4a\") " Oct 11 04:06:07 crc kubenswrapper[4798]: I1011 04:06:07.055698 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4pjjx\" (UniqueName: \"kubernetes.io/projected/79cb45d2-e74c-4bda-8464-2c2bbcdbcc4a-kube-api-access-4pjjx\") pod \"79cb45d2-e74c-4bda-8464-2c2bbcdbcc4a\" (UID: \"79cb45d2-e74c-4bda-8464-2c2bbcdbcc4a\") " Oct 11 04:06:07 crc kubenswrapper[4798]: I1011 04:06:07.055727 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/79cb45d2-e74c-4bda-8464-2c2bbcdbcc4a-trusted-ca-bundle\") pod \"79cb45d2-e74c-4bda-8464-2c2bbcdbcc4a\" (UID: \"79cb45d2-e74c-4bda-8464-2c2bbcdbcc4a\") " Oct 11 04:06:07 crc kubenswrapper[4798]: I1011 04:06:07.055768 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/79cb45d2-e74c-4bda-8464-2c2bbcdbcc4a-oauth-serving-cert\") pod \"79cb45d2-e74c-4bda-8464-2c2bbcdbcc4a\" (UID: \"79cb45d2-e74c-4bda-8464-2c2bbcdbcc4a\") " Oct 11 04:06:07 crc kubenswrapper[4798]: I1011 04:06:07.055809 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/79cb45d2-e74c-4bda-8464-2c2bbcdbcc4a-console-serving-cert\") pod \"79cb45d2-e74c-4bda-8464-2c2bbcdbcc4a\" (UID: \"79cb45d2-e74c-4bda-8464-2c2bbcdbcc4a\") " Oct 11 04:06:07 crc kubenswrapper[4798]: I1011 04:06:07.057089 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/79cb45d2-e74c-4bda-8464-2c2bbcdbcc4a-console-config" (OuterVolumeSpecName: "console-config") pod "79cb45d2-e74c-4bda-8464-2c2bbcdbcc4a" (UID: "79cb45d2-e74c-4bda-8464-2c2bbcdbcc4a"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 04:06:07 crc kubenswrapper[4798]: I1011 04:06:07.057132 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/79cb45d2-e74c-4bda-8464-2c2bbcdbcc4a-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "79cb45d2-e74c-4bda-8464-2c2bbcdbcc4a" (UID: "79cb45d2-e74c-4bda-8464-2c2bbcdbcc4a"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 04:06:07 crc kubenswrapper[4798]: I1011 04:06:07.057709 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/79cb45d2-e74c-4bda-8464-2c2bbcdbcc4a-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "79cb45d2-e74c-4bda-8464-2c2bbcdbcc4a" (UID: "79cb45d2-e74c-4bda-8464-2c2bbcdbcc4a"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 04:06:07 crc kubenswrapper[4798]: I1011 04:06:07.057948 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/79cb45d2-e74c-4bda-8464-2c2bbcdbcc4a-service-ca" (OuterVolumeSpecName: "service-ca") pod "79cb45d2-e74c-4bda-8464-2c2bbcdbcc4a" (UID: "79cb45d2-e74c-4bda-8464-2c2bbcdbcc4a"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 04:06:07 crc kubenswrapper[4798]: I1011 04:06:07.064187 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/79cb45d2-e74c-4bda-8464-2c2bbcdbcc4a-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "79cb45d2-e74c-4bda-8464-2c2bbcdbcc4a" (UID: "79cb45d2-e74c-4bda-8464-2c2bbcdbcc4a"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:06:07 crc kubenswrapper[4798]: I1011 04:06:07.064552 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/79cb45d2-e74c-4bda-8464-2c2bbcdbcc4a-kube-api-access-4pjjx" (OuterVolumeSpecName: "kube-api-access-4pjjx") pod "79cb45d2-e74c-4bda-8464-2c2bbcdbcc4a" (UID: "79cb45d2-e74c-4bda-8464-2c2bbcdbcc4a"). InnerVolumeSpecName "kube-api-access-4pjjx". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 04:06:07 crc kubenswrapper[4798]: I1011 04:06:07.064900 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/79cb45d2-e74c-4bda-8464-2c2bbcdbcc4a-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "79cb45d2-e74c-4bda-8464-2c2bbcdbcc4a" (UID: "79cb45d2-e74c-4bda-8464-2c2bbcdbcc4a"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:06:07 crc kubenswrapper[4798]: I1011 04:06:07.157956 4798 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/79cb45d2-e74c-4bda-8464-2c2bbcdbcc4a-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 11 04:06:07 crc kubenswrapper[4798]: I1011 04:06:07.157993 4798 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/79cb45d2-e74c-4bda-8464-2c2bbcdbcc4a-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Oct 11 04:06:07 crc kubenswrapper[4798]: I1011 04:06:07.158006 4798 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/79cb45d2-e74c-4bda-8464-2c2bbcdbcc4a-console-serving-cert\") on node \"crc\" DevicePath \"\"" Oct 11 04:06:07 crc kubenswrapper[4798]: I1011 04:06:07.158018 4798 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/79cb45d2-e74c-4bda-8464-2c2bbcdbcc4a-service-ca\") on node \"crc\" DevicePath \"\"" Oct 11 04:06:07 crc kubenswrapper[4798]: I1011 04:06:07.158029 4798 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/79cb45d2-e74c-4bda-8464-2c2bbcdbcc4a-console-oauth-config\") on node \"crc\" DevicePath \"\"" Oct 11 04:06:07 crc kubenswrapper[4798]: I1011 04:06:07.158040 4798 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/79cb45d2-e74c-4bda-8464-2c2bbcdbcc4a-console-config\") on node \"crc\" DevicePath \"\"" Oct 11 04:06:07 crc kubenswrapper[4798]: I1011 04:06:07.158053 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4pjjx\" (UniqueName: \"kubernetes.io/projected/79cb45d2-e74c-4bda-8464-2c2bbcdbcc4a-kube-api-access-4pjjx\") on node \"crc\" DevicePath \"\"" Oct 11 04:06:07 crc kubenswrapper[4798]: I1011 04:06:07.567116 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-5lpj9_79cb45d2-e74c-4bda-8464-2c2bbcdbcc4a/console/0.log" Oct 11 04:06:07 crc kubenswrapper[4798]: I1011 04:06:07.567180 4798 generic.go:334] "Generic (PLEG): container finished" podID="79cb45d2-e74c-4bda-8464-2c2bbcdbcc4a" containerID="fc7e27c5c34b193a1868b23b9ec01cb75ad09036a180fff1fe437da649f98ba6" exitCode=2 Oct 11 04:06:07 crc kubenswrapper[4798]: I1011 04:06:07.567232 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-5lpj9" event={"ID":"79cb45d2-e74c-4bda-8464-2c2bbcdbcc4a","Type":"ContainerDied","Data":"fc7e27c5c34b193a1868b23b9ec01cb75ad09036a180fff1fe437da649f98ba6"} Oct 11 04:06:07 crc kubenswrapper[4798]: I1011 04:06:07.567279 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-5lpj9" event={"ID":"79cb45d2-e74c-4bda-8464-2c2bbcdbcc4a","Type":"ContainerDied","Data":"3662d386db9616cb0b579dfa8696f618885216078f390a259ef21f8c31d5f5a4"} Oct 11 04:06:07 crc kubenswrapper[4798]: I1011 04:06:07.567287 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-5lpj9" Oct 11 04:06:07 crc kubenswrapper[4798]: I1011 04:06:07.567313 4798 scope.go:117] "RemoveContainer" containerID="fc7e27c5c34b193a1868b23b9ec01cb75ad09036a180fff1fe437da649f98ba6" Oct 11 04:06:07 crc kubenswrapper[4798]: I1011 04:06:07.595209 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f9d7485db-5lpj9"] Oct 11 04:06:07 crc kubenswrapper[4798]: I1011 04:06:07.599710 4798 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-console/console-f9d7485db-5lpj9"] Oct 11 04:06:07 crc kubenswrapper[4798]: I1011 04:06:07.619020 4798 scope.go:117] "RemoveContainer" containerID="fc7e27c5c34b193a1868b23b9ec01cb75ad09036a180fff1fe437da649f98ba6" Oct 11 04:06:07 crc kubenswrapper[4798]: E1011 04:06:07.619782 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fc7e27c5c34b193a1868b23b9ec01cb75ad09036a180fff1fe437da649f98ba6\": container with ID starting with fc7e27c5c34b193a1868b23b9ec01cb75ad09036a180fff1fe437da649f98ba6 not found: ID does not exist" containerID="fc7e27c5c34b193a1868b23b9ec01cb75ad09036a180fff1fe437da649f98ba6" Oct 11 04:06:07 crc kubenswrapper[4798]: I1011 04:06:07.619838 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fc7e27c5c34b193a1868b23b9ec01cb75ad09036a180fff1fe437da649f98ba6"} err="failed to get container status \"fc7e27c5c34b193a1868b23b9ec01cb75ad09036a180fff1fe437da649f98ba6\": rpc error: code = NotFound desc = could not find container \"fc7e27c5c34b193a1868b23b9ec01cb75ad09036a180fff1fe437da649f98ba6\": container with ID starting with fc7e27c5c34b193a1868b23b9ec01cb75ad09036a180fff1fe437da649f98ba6 not found: ID does not exist" Oct 11 04:06:08 crc kubenswrapper[4798]: I1011 04:06:08.577875 4798 generic.go:334] "Generic (PLEG): container finished" podID="c25b8b69-843d-4c9e-80f8-0e41b7f8aec3" containerID="d7139054d269489195734db673e42a42d9c9946786e9ff5e9d4f8a02ee1b7891" exitCode=0 Oct 11 04:06:08 crc kubenswrapper[4798]: I1011 04:06:08.578114 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d2vws4k" event={"ID":"c25b8b69-843d-4c9e-80f8-0e41b7f8aec3","Type":"ContainerDied","Data":"d7139054d269489195734db673e42a42d9c9946786e9ff5e9d4f8a02ee1b7891"} Oct 11 04:06:09 crc kubenswrapper[4798]: I1011 04:06:09.431603 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="79cb45d2-e74c-4bda-8464-2c2bbcdbcc4a" path="/var/lib/kubelet/pods/79cb45d2-e74c-4bda-8464-2c2bbcdbcc4a/volumes" Oct 11 04:06:09 crc kubenswrapper[4798]: I1011 04:06:09.592568 4798 generic.go:334] "Generic (PLEG): container finished" podID="c25b8b69-843d-4c9e-80f8-0e41b7f8aec3" containerID="8e9efdf9a726f386463c8fc7c0284f4903d0b5689ece099309b6a4570132f1ca" exitCode=0 Oct 11 04:06:09 crc kubenswrapper[4798]: I1011 04:06:09.592622 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d2vws4k" event={"ID":"c25b8b69-843d-4c9e-80f8-0e41b7f8aec3","Type":"ContainerDied","Data":"8e9efdf9a726f386463c8fc7c0284f4903d0b5689ece099309b6a4570132f1ca"} Oct 11 04:06:10 crc kubenswrapper[4798]: I1011 04:06:10.931744 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d2vws4k" Oct 11 04:06:11 crc kubenswrapper[4798]: I1011 04:06:11.016487 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/c25b8b69-843d-4c9e-80f8-0e41b7f8aec3-util\") pod \"c25b8b69-843d-4c9e-80f8-0e41b7f8aec3\" (UID: \"c25b8b69-843d-4c9e-80f8-0e41b7f8aec3\") " Oct 11 04:06:11 crc kubenswrapper[4798]: I1011 04:06:11.016725 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m57zm\" (UniqueName: \"kubernetes.io/projected/c25b8b69-843d-4c9e-80f8-0e41b7f8aec3-kube-api-access-m57zm\") pod \"c25b8b69-843d-4c9e-80f8-0e41b7f8aec3\" (UID: \"c25b8b69-843d-4c9e-80f8-0e41b7f8aec3\") " Oct 11 04:06:11 crc kubenswrapper[4798]: I1011 04:06:11.016861 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/c25b8b69-843d-4c9e-80f8-0e41b7f8aec3-bundle\") pod \"c25b8b69-843d-4c9e-80f8-0e41b7f8aec3\" (UID: \"c25b8b69-843d-4c9e-80f8-0e41b7f8aec3\") " Oct 11 04:06:11 crc kubenswrapper[4798]: I1011 04:06:11.018918 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c25b8b69-843d-4c9e-80f8-0e41b7f8aec3-bundle" (OuterVolumeSpecName: "bundle") pod "c25b8b69-843d-4c9e-80f8-0e41b7f8aec3" (UID: "c25b8b69-843d-4c9e-80f8-0e41b7f8aec3"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 04:06:11 crc kubenswrapper[4798]: I1011 04:06:11.025242 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c25b8b69-843d-4c9e-80f8-0e41b7f8aec3-kube-api-access-m57zm" (OuterVolumeSpecName: "kube-api-access-m57zm") pod "c25b8b69-843d-4c9e-80f8-0e41b7f8aec3" (UID: "c25b8b69-843d-4c9e-80f8-0e41b7f8aec3"). InnerVolumeSpecName "kube-api-access-m57zm". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 04:06:11 crc kubenswrapper[4798]: I1011 04:06:11.035176 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c25b8b69-843d-4c9e-80f8-0e41b7f8aec3-util" (OuterVolumeSpecName: "util") pod "c25b8b69-843d-4c9e-80f8-0e41b7f8aec3" (UID: "c25b8b69-843d-4c9e-80f8-0e41b7f8aec3"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 04:06:11 crc kubenswrapper[4798]: I1011 04:06:11.118882 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m57zm\" (UniqueName: \"kubernetes.io/projected/c25b8b69-843d-4c9e-80f8-0e41b7f8aec3-kube-api-access-m57zm\") on node \"crc\" DevicePath \"\"" Oct 11 04:06:11 crc kubenswrapper[4798]: I1011 04:06:11.118956 4798 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/c25b8b69-843d-4c9e-80f8-0e41b7f8aec3-bundle\") on node \"crc\" DevicePath \"\"" Oct 11 04:06:11 crc kubenswrapper[4798]: I1011 04:06:11.118978 4798 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/c25b8b69-843d-4c9e-80f8-0e41b7f8aec3-util\") on node \"crc\" DevicePath \"\"" Oct 11 04:06:11 crc kubenswrapper[4798]: I1011 04:06:11.610294 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d2vws4k" event={"ID":"c25b8b69-843d-4c9e-80f8-0e41b7f8aec3","Type":"ContainerDied","Data":"de7b41e9edf3b37674a616266930fb058b8a9623e4a01f87c7d44273896c6854"} Oct 11 04:06:11 crc kubenswrapper[4798]: I1011 04:06:11.610804 4798 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="de7b41e9edf3b37674a616266930fb058b8a9623e4a01f87c7d44273896c6854" Oct 11 04:06:11 crc kubenswrapper[4798]: I1011 04:06:11.610628 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d2vws4k" Oct 11 04:06:21 crc kubenswrapper[4798]: I1011 04:06:21.073042 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-controller-manager-7458f849d5-nbss8"] Oct 11 04:06:21 crc kubenswrapper[4798]: E1011 04:06:21.074171 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c25b8b69-843d-4c9e-80f8-0e41b7f8aec3" containerName="extract" Oct 11 04:06:21 crc kubenswrapper[4798]: I1011 04:06:21.074189 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="c25b8b69-843d-4c9e-80f8-0e41b7f8aec3" containerName="extract" Oct 11 04:06:21 crc kubenswrapper[4798]: E1011 04:06:21.074222 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c25b8b69-843d-4c9e-80f8-0e41b7f8aec3" containerName="util" Oct 11 04:06:21 crc kubenswrapper[4798]: I1011 04:06:21.074231 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="c25b8b69-843d-4c9e-80f8-0e41b7f8aec3" containerName="util" Oct 11 04:06:21 crc kubenswrapper[4798]: E1011 04:06:21.074244 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c25b8b69-843d-4c9e-80f8-0e41b7f8aec3" containerName="pull" Oct 11 04:06:21 crc kubenswrapper[4798]: I1011 04:06:21.074253 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="c25b8b69-843d-4c9e-80f8-0e41b7f8aec3" containerName="pull" Oct 11 04:06:21 crc kubenswrapper[4798]: E1011 04:06:21.074263 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="79cb45d2-e74c-4bda-8464-2c2bbcdbcc4a" containerName="console" Oct 11 04:06:21 crc kubenswrapper[4798]: I1011 04:06:21.074269 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="79cb45d2-e74c-4bda-8464-2c2bbcdbcc4a" containerName="console" Oct 11 04:06:21 crc kubenswrapper[4798]: I1011 04:06:21.074458 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="79cb45d2-e74c-4bda-8464-2c2bbcdbcc4a" containerName="console" Oct 11 04:06:21 crc kubenswrapper[4798]: I1011 04:06:21.074476 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="c25b8b69-843d-4c9e-80f8-0e41b7f8aec3" containerName="extract" Oct 11 04:06:21 crc kubenswrapper[4798]: I1011 04:06:21.074950 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-7458f849d5-nbss8" Oct 11 04:06:21 crc kubenswrapper[4798]: I1011 04:06:21.078275 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"openshift-service-ca.crt" Oct 11 04:06:21 crc kubenswrapper[4798]: I1011 04:06:21.078763 4798 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"manager-account-dockercfg-nhrvj" Oct 11 04:06:21 crc kubenswrapper[4798]: I1011 04:06:21.078947 4798 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-controller-manager-service-cert" Oct 11 04:06:21 crc kubenswrapper[4798]: I1011 04:06:21.079062 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"kube-root-ca.crt" Oct 11 04:06:21 crc kubenswrapper[4798]: I1011 04:06:21.081643 4798 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-cert" Oct 11 04:06:21 crc kubenswrapper[4798]: I1011 04:06:21.099973 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-7458f849d5-nbss8"] Oct 11 04:06:21 crc kubenswrapper[4798]: I1011 04:06:21.201653 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/d1544005-518c-4296-b846-c5b9ac3af4c0-webhook-cert\") pod \"metallb-operator-controller-manager-7458f849d5-nbss8\" (UID: \"d1544005-518c-4296-b846-c5b9ac3af4c0\") " pod="metallb-system/metallb-operator-controller-manager-7458f849d5-nbss8" Oct 11 04:06:21 crc kubenswrapper[4798]: I1011 04:06:21.201997 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/d1544005-518c-4296-b846-c5b9ac3af4c0-apiservice-cert\") pod \"metallb-operator-controller-manager-7458f849d5-nbss8\" (UID: \"d1544005-518c-4296-b846-c5b9ac3af4c0\") " pod="metallb-system/metallb-operator-controller-manager-7458f849d5-nbss8" Oct 11 04:06:21 crc kubenswrapper[4798]: I1011 04:06:21.202109 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6njth\" (UniqueName: \"kubernetes.io/projected/d1544005-518c-4296-b846-c5b9ac3af4c0-kube-api-access-6njth\") pod \"metallb-operator-controller-manager-7458f849d5-nbss8\" (UID: \"d1544005-518c-4296-b846-c5b9ac3af4c0\") " pod="metallb-system/metallb-operator-controller-manager-7458f849d5-nbss8" Oct 11 04:06:21 crc kubenswrapper[4798]: I1011 04:06:21.303731 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/d1544005-518c-4296-b846-c5b9ac3af4c0-webhook-cert\") pod \"metallb-operator-controller-manager-7458f849d5-nbss8\" (UID: \"d1544005-518c-4296-b846-c5b9ac3af4c0\") " pod="metallb-system/metallb-operator-controller-manager-7458f849d5-nbss8" Oct 11 04:06:21 crc kubenswrapper[4798]: I1011 04:06:21.303786 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/d1544005-518c-4296-b846-c5b9ac3af4c0-apiservice-cert\") pod \"metallb-operator-controller-manager-7458f849d5-nbss8\" (UID: \"d1544005-518c-4296-b846-c5b9ac3af4c0\") " pod="metallb-system/metallb-operator-controller-manager-7458f849d5-nbss8" Oct 11 04:06:21 crc kubenswrapper[4798]: I1011 04:06:21.303838 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6njth\" (UniqueName: \"kubernetes.io/projected/d1544005-518c-4296-b846-c5b9ac3af4c0-kube-api-access-6njth\") pod \"metallb-operator-controller-manager-7458f849d5-nbss8\" (UID: \"d1544005-518c-4296-b846-c5b9ac3af4c0\") " pod="metallb-system/metallb-operator-controller-manager-7458f849d5-nbss8" Oct 11 04:06:21 crc kubenswrapper[4798]: I1011 04:06:21.310620 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/d1544005-518c-4296-b846-c5b9ac3af4c0-webhook-cert\") pod \"metallb-operator-controller-manager-7458f849d5-nbss8\" (UID: \"d1544005-518c-4296-b846-c5b9ac3af4c0\") " pod="metallb-system/metallb-operator-controller-manager-7458f849d5-nbss8" Oct 11 04:06:21 crc kubenswrapper[4798]: I1011 04:06:21.320552 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6njth\" (UniqueName: \"kubernetes.io/projected/d1544005-518c-4296-b846-c5b9ac3af4c0-kube-api-access-6njth\") pod \"metallb-operator-controller-manager-7458f849d5-nbss8\" (UID: \"d1544005-518c-4296-b846-c5b9ac3af4c0\") " pod="metallb-system/metallb-operator-controller-manager-7458f849d5-nbss8" Oct 11 04:06:21 crc kubenswrapper[4798]: I1011 04:06:21.326347 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/d1544005-518c-4296-b846-c5b9ac3af4c0-apiservice-cert\") pod \"metallb-operator-controller-manager-7458f849d5-nbss8\" (UID: \"d1544005-518c-4296-b846-c5b9ac3af4c0\") " pod="metallb-system/metallb-operator-controller-manager-7458f849d5-nbss8" Oct 11 04:06:21 crc kubenswrapper[4798]: I1011 04:06:21.390915 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-7458f849d5-nbss8" Oct 11 04:06:21 crc kubenswrapper[4798]: I1011 04:06:21.414801 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-webhook-server-5786d94797-8n6wg"] Oct 11 04:06:21 crc kubenswrapper[4798]: I1011 04:06:21.415582 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-5786d94797-8n6wg" Oct 11 04:06:21 crc kubenswrapper[4798]: I1011 04:06:21.417799 4798 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-service-cert" Oct 11 04:06:21 crc kubenswrapper[4798]: I1011 04:06:21.417863 4798 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-webhook-cert" Oct 11 04:06:21 crc kubenswrapper[4798]: I1011 04:06:21.417819 4798 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-dockercfg-x8xvt" Oct 11 04:06:21 crc kubenswrapper[4798]: I1011 04:06:21.454250 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-5786d94797-8n6wg"] Oct 11 04:06:21 crc kubenswrapper[4798]: I1011 04:06:21.610102 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/dbb4b409-1678-4ae6-b584-9442c133945f-webhook-cert\") pod \"metallb-operator-webhook-server-5786d94797-8n6wg\" (UID: \"dbb4b409-1678-4ae6-b584-9442c133945f\") " pod="metallb-system/metallb-operator-webhook-server-5786d94797-8n6wg" Oct 11 04:06:21 crc kubenswrapper[4798]: I1011 04:06:21.610170 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/dbb4b409-1678-4ae6-b584-9442c133945f-apiservice-cert\") pod \"metallb-operator-webhook-server-5786d94797-8n6wg\" (UID: \"dbb4b409-1678-4ae6-b584-9442c133945f\") " pod="metallb-system/metallb-operator-webhook-server-5786d94797-8n6wg" Oct 11 04:06:21 crc kubenswrapper[4798]: I1011 04:06:21.610350 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vc7bn\" (UniqueName: \"kubernetes.io/projected/dbb4b409-1678-4ae6-b584-9442c133945f-kube-api-access-vc7bn\") pod \"metallb-operator-webhook-server-5786d94797-8n6wg\" (UID: \"dbb4b409-1678-4ae6-b584-9442c133945f\") " pod="metallb-system/metallb-operator-webhook-server-5786d94797-8n6wg" Oct 11 04:06:21 crc kubenswrapper[4798]: I1011 04:06:21.712235 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vc7bn\" (UniqueName: \"kubernetes.io/projected/dbb4b409-1678-4ae6-b584-9442c133945f-kube-api-access-vc7bn\") pod \"metallb-operator-webhook-server-5786d94797-8n6wg\" (UID: \"dbb4b409-1678-4ae6-b584-9442c133945f\") " pod="metallb-system/metallb-operator-webhook-server-5786d94797-8n6wg" Oct 11 04:06:21 crc kubenswrapper[4798]: I1011 04:06:21.712371 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/dbb4b409-1678-4ae6-b584-9442c133945f-webhook-cert\") pod \"metallb-operator-webhook-server-5786d94797-8n6wg\" (UID: \"dbb4b409-1678-4ae6-b584-9442c133945f\") " pod="metallb-system/metallb-operator-webhook-server-5786d94797-8n6wg" Oct 11 04:06:21 crc kubenswrapper[4798]: I1011 04:06:21.712516 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/dbb4b409-1678-4ae6-b584-9442c133945f-apiservice-cert\") pod \"metallb-operator-webhook-server-5786d94797-8n6wg\" (UID: \"dbb4b409-1678-4ae6-b584-9442c133945f\") " pod="metallb-system/metallb-operator-webhook-server-5786d94797-8n6wg" Oct 11 04:06:21 crc kubenswrapper[4798]: I1011 04:06:21.718644 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/dbb4b409-1678-4ae6-b584-9442c133945f-webhook-cert\") pod \"metallb-operator-webhook-server-5786d94797-8n6wg\" (UID: \"dbb4b409-1678-4ae6-b584-9442c133945f\") " pod="metallb-system/metallb-operator-webhook-server-5786d94797-8n6wg" Oct 11 04:06:21 crc kubenswrapper[4798]: I1011 04:06:21.735772 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/dbb4b409-1678-4ae6-b584-9442c133945f-apiservice-cert\") pod \"metallb-operator-webhook-server-5786d94797-8n6wg\" (UID: \"dbb4b409-1678-4ae6-b584-9442c133945f\") " pod="metallb-system/metallb-operator-webhook-server-5786d94797-8n6wg" Oct 11 04:06:21 crc kubenswrapper[4798]: I1011 04:06:21.736125 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vc7bn\" (UniqueName: \"kubernetes.io/projected/dbb4b409-1678-4ae6-b584-9442c133945f-kube-api-access-vc7bn\") pod \"metallb-operator-webhook-server-5786d94797-8n6wg\" (UID: \"dbb4b409-1678-4ae6-b584-9442c133945f\") " pod="metallb-system/metallb-operator-webhook-server-5786d94797-8n6wg" Oct 11 04:06:21 crc kubenswrapper[4798]: I1011 04:06:21.792246 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-5786d94797-8n6wg" Oct 11 04:06:21 crc kubenswrapper[4798]: I1011 04:06:21.867315 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-7458f849d5-nbss8"] Oct 11 04:06:22 crc kubenswrapper[4798]: I1011 04:06:22.245279 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-5786d94797-8n6wg"] Oct 11 04:06:22 crc kubenswrapper[4798]: W1011 04:06:22.253100 4798 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poddbb4b409_1678_4ae6_b584_9442c133945f.slice/crio-a16da4473a70b89aa2f61f8708f9f4e11e5cfabe53d0957675a1369c0dc085ed WatchSource:0}: Error finding container a16da4473a70b89aa2f61f8708f9f4e11e5cfabe53d0957675a1369c0dc085ed: Status 404 returned error can't find the container with id a16da4473a70b89aa2f61f8708f9f4e11e5cfabe53d0957675a1369c0dc085ed Oct 11 04:06:22 crc kubenswrapper[4798]: I1011 04:06:22.678449 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-5786d94797-8n6wg" event={"ID":"dbb4b409-1678-4ae6-b584-9442c133945f","Type":"ContainerStarted","Data":"a16da4473a70b89aa2f61f8708f9f4e11e5cfabe53d0957675a1369c0dc085ed"} Oct 11 04:06:22 crc kubenswrapper[4798]: I1011 04:06:22.679320 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-7458f849d5-nbss8" event={"ID":"d1544005-518c-4296-b846-c5b9ac3af4c0","Type":"ContainerStarted","Data":"180b2d3156f2290d88c7b153f67afa855c0d138614f61dc1c2147af1b93b55eb"} Oct 11 04:06:25 crc kubenswrapper[4798]: I1011 04:06:25.702529 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-7458f849d5-nbss8" event={"ID":"d1544005-518c-4296-b846-c5b9ac3af4c0","Type":"ContainerStarted","Data":"4242268a0e65f63fe1ae85c3f40bb1439df07751d811634610898aaa345491db"} Oct 11 04:06:25 crc kubenswrapper[4798]: I1011 04:06:25.703265 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-controller-manager-7458f849d5-nbss8" Oct 11 04:06:25 crc kubenswrapper[4798]: I1011 04:06:25.730541 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-controller-manager-7458f849d5-nbss8" podStartSLOduration=1.7985479899999999 podStartE2EDuration="4.730520751s" podCreationTimestamp="2025-10-11 04:06:21 +0000 UTC" firstStartedPulling="2025-10-11 04:06:21.900641842 +0000 UTC m=+677.236931528" lastFinishedPulling="2025-10-11 04:06:24.832614603 +0000 UTC m=+680.168904289" observedRunningTime="2025-10-11 04:06:25.725971119 +0000 UTC m=+681.062260815" watchObservedRunningTime="2025-10-11 04:06:25.730520751 +0000 UTC m=+681.066810437" Oct 11 04:06:27 crc kubenswrapper[4798]: I1011 04:06:27.140353 4798 patch_prober.go:28] interesting pod/machine-config-daemon-h28s2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 11 04:06:27 crc kubenswrapper[4798]: I1011 04:06:27.140882 4798 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 11 04:06:27 crc kubenswrapper[4798]: I1011 04:06:27.717174 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-5786d94797-8n6wg" event={"ID":"dbb4b409-1678-4ae6-b584-9442c133945f","Type":"ContainerStarted","Data":"55762be0f7d87c67b6d62abb05089c51c00d32a05ded7fc6d6818bc65bdfa35f"} Oct 11 04:06:27 crc kubenswrapper[4798]: I1011 04:06:27.717551 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-webhook-server-5786d94797-8n6wg" Oct 11 04:06:27 crc kubenswrapper[4798]: I1011 04:06:27.736241 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-webhook-server-5786d94797-8n6wg" podStartSLOduration=1.923250758 podStartE2EDuration="6.736220196s" podCreationTimestamp="2025-10-11 04:06:21 +0000 UTC" firstStartedPulling="2025-10-11 04:06:22.256937256 +0000 UTC m=+677.593226942" lastFinishedPulling="2025-10-11 04:06:27.069906694 +0000 UTC m=+682.406196380" observedRunningTime="2025-10-11 04:06:27.735103402 +0000 UTC m=+683.071393088" watchObservedRunningTime="2025-10-11 04:06:27.736220196 +0000 UTC m=+683.072509882" Oct 11 04:06:41 crc kubenswrapper[4798]: I1011 04:06:41.796471 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-webhook-server-5786d94797-8n6wg" Oct 11 04:06:57 crc kubenswrapper[4798]: I1011 04:06:57.138181 4798 patch_prober.go:28] interesting pod/machine-config-daemon-h28s2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 11 04:06:57 crc kubenswrapper[4798]: I1011 04:06:57.138906 4798 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 11 04:07:01 crc kubenswrapper[4798]: I1011 04:07:01.394840 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-controller-manager-7458f849d5-nbss8" Oct 11 04:07:02 crc kubenswrapper[4798]: I1011 04:07:02.104047 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-7q5rr"] Oct 11 04:07:02 crc kubenswrapper[4798]: I1011 04:07:02.107635 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-7q5rr" Oct 11 04:07:02 crc kubenswrapper[4798]: I1011 04:07:02.111075 4798 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-daemon-dockercfg-hqqv7" Oct 11 04:07:02 crc kubenswrapper[4798]: I1011 04:07:02.113784 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"frr-startup" Oct 11 04:07:02 crc kubenswrapper[4798]: I1011 04:07:02.117581 4798 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-certs-secret" Oct 11 04:07:02 crc kubenswrapper[4798]: I1011 04:07:02.145559 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-webhook-server-64bf5d555-2mrbj"] Oct 11 04:07:02 crc kubenswrapper[4798]: I1011 04:07:02.146285 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-64bf5d555-2mrbj" Oct 11 04:07:02 crc kubenswrapper[4798]: I1011 04:07:02.150017 4798 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-webhook-server-cert" Oct 11 04:07:02 crc kubenswrapper[4798]: I1011 04:07:02.192235 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/41a4b406-9a78-4d67-828b-81a3ce16248f-reloader\") pod \"frr-k8s-7q5rr\" (UID: \"41a4b406-9a78-4d67-828b-81a3ce16248f\") " pod="metallb-system/frr-k8s-7q5rr" Oct 11 04:07:02 crc kubenswrapper[4798]: I1011 04:07:02.192416 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/41a4b406-9a78-4d67-828b-81a3ce16248f-frr-conf\") pod \"frr-k8s-7q5rr\" (UID: \"41a4b406-9a78-4d67-828b-81a3ce16248f\") " pod="metallb-system/frr-k8s-7q5rr" Oct 11 04:07:02 crc kubenswrapper[4798]: I1011 04:07:02.192498 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/41a4b406-9a78-4d67-828b-81a3ce16248f-metrics\") pod \"frr-k8s-7q5rr\" (UID: \"41a4b406-9a78-4d67-828b-81a3ce16248f\") " pod="metallb-system/frr-k8s-7q5rr" Oct 11 04:07:02 crc kubenswrapper[4798]: I1011 04:07:02.192540 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/41a4b406-9a78-4d67-828b-81a3ce16248f-frr-sockets\") pod \"frr-k8s-7q5rr\" (UID: \"41a4b406-9a78-4d67-828b-81a3ce16248f\") " pod="metallb-system/frr-k8s-7q5rr" Oct 11 04:07:02 crc kubenswrapper[4798]: I1011 04:07:02.192566 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ppg67\" (UniqueName: \"kubernetes.io/projected/41a4b406-9a78-4d67-828b-81a3ce16248f-kube-api-access-ppg67\") pod \"frr-k8s-7q5rr\" (UID: \"41a4b406-9a78-4d67-828b-81a3ce16248f\") " pod="metallb-system/frr-k8s-7q5rr" Oct 11 04:07:02 crc kubenswrapper[4798]: I1011 04:07:02.192617 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/41a4b406-9a78-4d67-828b-81a3ce16248f-frr-startup\") pod \"frr-k8s-7q5rr\" (UID: \"41a4b406-9a78-4d67-828b-81a3ce16248f\") " pod="metallb-system/frr-k8s-7q5rr" Oct 11 04:07:02 crc kubenswrapper[4798]: I1011 04:07:02.192681 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/41a4b406-9a78-4d67-828b-81a3ce16248f-metrics-certs\") pod \"frr-k8s-7q5rr\" (UID: \"41a4b406-9a78-4d67-828b-81a3ce16248f\") " pod="metallb-system/frr-k8s-7q5rr" Oct 11 04:07:02 crc kubenswrapper[4798]: I1011 04:07:02.237498 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-64bf5d555-2mrbj"] Oct 11 04:07:02 crc kubenswrapper[4798]: I1011 04:07:02.293569 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/41a4b406-9a78-4d67-828b-81a3ce16248f-metrics\") pod \"frr-k8s-7q5rr\" (UID: \"41a4b406-9a78-4d67-828b-81a3ce16248f\") " pod="metallb-system/frr-k8s-7q5rr" Oct 11 04:07:02 crc kubenswrapper[4798]: I1011 04:07:02.293629 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/41a4b406-9a78-4d67-828b-81a3ce16248f-frr-sockets\") pod \"frr-k8s-7q5rr\" (UID: \"41a4b406-9a78-4d67-828b-81a3ce16248f\") " pod="metallb-system/frr-k8s-7q5rr" Oct 11 04:07:02 crc kubenswrapper[4798]: I1011 04:07:02.293648 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ppg67\" (UniqueName: \"kubernetes.io/projected/41a4b406-9a78-4d67-828b-81a3ce16248f-kube-api-access-ppg67\") pod \"frr-k8s-7q5rr\" (UID: \"41a4b406-9a78-4d67-828b-81a3ce16248f\") " pod="metallb-system/frr-k8s-7q5rr" Oct 11 04:07:02 crc kubenswrapper[4798]: I1011 04:07:02.293673 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/41a4b406-9a78-4d67-828b-81a3ce16248f-frr-startup\") pod \"frr-k8s-7q5rr\" (UID: \"41a4b406-9a78-4d67-828b-81a3ce16248f\") " pod="metallb-system/frr-k8s-7q5rr" Oct 11 04:07:02 crc kubenswrapper[4798]: I1011 04:07:02.293709 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zj662\" (UniqueName: \"kubernetes.io/projected/71cb0bdd-c787-4000-b0fd-f7a0e84fa145-kube-api-access-zj662\") pod \"frr-k8s-webhook-server-64bf5d555-2mrbj\" (UID: \"71cb0bdd-c787-4000-b0fd-f7a0e84fa145\") " pod="metallb-system/frr-k8s-webhook-server-64bf5d555-2mrbj" Oct 11 04:07:02 crc kubenswrapper[4798]: I1011 04:07:02.293732 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/41a4b406-9a78-4d67-828b-81a3ce16248f-metrics-certs\") pod \"frr-k8s-7q5rr\" (UID: \"41a4b406-9a78-4d67-828b-81a3ce16248f\") " pod="metallb-system/frr-k8s-7q5rr" Oct 11 04:07:02 crc kubenswrapper[4798]: I1011 04:07:02.293770 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/41a4b406-9a78-4d67-828b-81a3ce16248f-reloader\") pod \"frr-k8s-7q5rr\" (UID: \"41a4b406-9a78-4d67-828b-81a3ce16248f\") " pod="metallb-system/frr-k8s-7q5rr" Oct 11 04:07:02 crc kubenswrapper[4798]: I1011 04:07:02.293795 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/71cb0bdd-c787-4000-b0fd-f7a0e84fa145-cert\") pod \"frr-k8s-webhook-server-64bf5d555-2mrbj\" (UID: \"71cb0bdd-c787-4000-b0fd-f7a0e84fa145\") " pod="metallb-system/frr-k8s-webhook-server-64bf5d555-2mrbj" Oct 11 04:07:02 crc kubenswrapper[4798]: I1011 04:07:02.293824 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/41a4b406-9a78-4d67-828b-81a3ce16248f-frr-conf\") pod \"frr-k8s-7q5rr\" (UID: \"41a4b406-9a78-4d67-828b-81a3ce16248f\") " pod="metallb-system/frr-k8s-7q5rr" Oct 11 04:07:02 crc kubenswrapper[4798]: I1011 04:07:02.294131 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/41a4b406-9a78-4d67-828b-81a3ce16248f-metrics\") pod \"frr-k8s-7q5rr\" (UID: \"41a4b406-9a78-4d67-828b-81a3ce16248f\") " pod="metallb-system/frr-k8s-7q5rr" Oct 11 04:07:02 crc kubenswrapper[4798]: I1011 04:07:02.294267 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/41a4b406-9a78-4d67-828b-81a3ce16248f-frr-sockets\") pod \"frr-k8s-7q5rr\" (UID: \"41a4b406-9a78-4d67-828b-81a3ce16248f\") " pod="metallb-system/frr-k8s-7q5rr" Oct 11 04:07:02 crc kubenswrapper[4798]: I1011 04:07:02.295208 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/41a4b406-9a78-4d67-828b-81a3ce16248f-frr-startup\") pod \"frr-k8s-7q5rr\" (UID: \"41a4b406-9a78-4d67-828b-81a3ce16248f\") " pod="metallb-system/frr-k8s-7q5rr" Oct 11 04:07:02 crc kubenswrapper[4798]: E1011 04:07:02.295290 4798 secret.go:188] Couldn't get secret metallb-system/frr-k8s-certs-secret: secret "frr-k8s-certs-secret" not found Oct 11 04:07:02 crc kubenswrapper[4798]: E1011 04:07:02.295331 4798 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/41a4b406-9a78-4d67-828b-81a3ce16248f-metrics-certs podName:41a4b406-9a78-4d67-828b-81a3ce16248f nodeName:}" failed. No retries permitted until 2025-10-11 04:07:02.795317765 +0000 UTC m=+718.131607451 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/41a4b406-9a78-4d67-828b-81a3ce16248f-metrics-certs") pod "frr-k8s-7q5rr" (UID: "41a4b406-9a78-4d67-828b-81a3ce16248f") : secret "frr-k8s-certs-secret" not found Oct 11 04:07:02 crc kubenswrapper[4798]: I1011 04:07:02.297775 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/41a4b406-9a78-4d67-828b-81a3ce16248f-frr-conf\") pod \"frr-k8s-7q5rr\" (UID: \"41a4b406-9a78-4d67-828b-81a3ce16248f\") " pod="metallb-system/frr-k8s-7q5rr" Oct 11 04:07:02 crc kubenswrapper[4798]: I1011 04:07:02.300655 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/41a4b406-9a78-4d67-828b-81a3ce16248f-reloader\") pod \"frr-k8s-7q5rr\" (UID: \"41a4b406-9a78-4d67-828b-81a3ce16248f\") " pod="metallb-system/frr-k8s-7q5rr" Oct 11 04:07:02 crc kubenswrapper[4798]: I1011 04:07:02.325174 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ppg67\" (UniqueName: \"kubernetes.io/projected/41a4b406-9a78-4d67-828b-81a3ce16248f-kube-api-access-ppg67\") pod \"frr-k8s-7q5rr\" (UID: \"41a4b406-9a78-4d67-828b-81a3ce16248f\") " pod="metallb-system/frr-k8s-7q5rr" Oct 11 04:07:02 crc kubenswrapper[4798]: I1011 04:07:02.353015 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/controller-68d546b9d8-qt94z"] Oct 11 04:07:02 crc kubenswrapper[4798]: I1011 04:07:02.354543 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-68d546b9d8-qt94z" Oct 11 04:07:02 crc kubenswrapper[4798]: I1011 04:07:02.356361 4798 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-certs-secret" Oct 11 04:07:02 crc kubenswrapper[4798]: I1011 04:07:02.356726 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/speaker-zdkcf"] Oct 11 04:07:02 crc kubenswrapper[4798]: I1011 04:07:02.357833 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-zdkcf" Oct 11 04:07:02 crc kubenswrapper[4798]: I1011 04:07:02.359151 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"metallb-excludel2" Oct 11 04:07:02 crc kubenswrapper[4798]: I1011 04:07:02.359897 4798 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-memberlist" Oct 11 04:07:02 crc kubenswrapper[4798]: I1011 04:07:02.360069 4798 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-certs-secret" Oct 11 04:07:02 crc kubenswrapper[4798]: I1011 04:07:02.360972 4798 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-dockercfg-9qh42" Oct 11 04:07:02 crc kubenswrapper[4798]: I1011 04:07:02.395125 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zj662\" (UniqueName: \"kubernetes.io/projected/71cb0bdd-c787-4000-b0fd-f7a0e84fa145-kube-api-access-zj662\") pod \"frr-k8s-webhook-server-64bf5d555-2mrbj\" (UID: \"71cb0bdd-c787-4000-b0fd-f7a0e84fa145\") " pod="metallb-system/frr-k8s-webhook-server-64bf5d555-2mrbj" Oct 11 04:07:02 crc kubenswrapper[4798]: I1011 04:07:02.395216 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/71cb0bdd-c787-4000-b0fd-f7a0e84fa145-cert\") pod \"frr-k8s-webhook-server-64bf5d555-2mrbj\" (UID: \"71cb0bdd-c787-4000-b0fd-f7a0e84fa145\") " pod="metallb-system/frr-k8s-webhook-server-64bf5d555-2mrbj" Oct 11 04:07:02 crc kubenswrapper[4798]: I1011 04:07:02.402454 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/71cb0bdd-c787-4000-b0fd-f7a0e84fa145-cert\") pod \"frr-k8s-webhook-server-64bf5d555-2mrbj\" (UID: \"71cb0bdd-c787-4000-b0fd-f7a0e84fa145\") " pod="metallb-system/frr-k8s-webhook-server-64bf5d555-2mrbj" Oct 11 04:07:02 crc kubenswrapper[4798]: I1011 04:07:02.413272 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-68d546b9d8-qt94z"] Oct 11 04:07:02 crc kubenswrapper[4798]: I1011 04:07:02.422124 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zj662\" (UniqueName: \"kubernetes.io/projected/71cb0bdd-c787-4000-b0fd-f7a0e84fa145-kube-api-access-zj662\") pod \"frr-k8s-webhook-server-64bf5d555-2mrbj\" (UID: \"71cb0bdd-c787-4000-b0fd-f7a0e84fa145\") " pod="metallb-system/frr-k8s-webhook-server-64bf5d555-2mrbj" Oct 11 04:07:02 crc kubenswrapper[4798]: I1011 04:07:02.460225 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-64bf5d555-2mrbj" Oct 11 04:07:02 crc kubenswrapper[4798]: I1011 04:07:02.496455 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/8a76e293-0882-4754-b825-688511b6c234-memberlist\") pod \"speaker-zdkcf\" (UID: \"8a76e293-0882-4754-b825-688511b6c234\") " pod="metallb-system/speaker-zdkcf" Oct 11 04:07:02 crc kubenswrapper[4798]: I1011 04:07:02.496537 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/8a76e293-0882-4754-b825-688511b6c234-metallb-excludel2\") pod \"speaker-zdkcf\" (UID: \"8a76e293-0882-4754-b825-688511b6c234\") " pod="metallb-system/speaker-zdkcf" Oct 11 04:07:02 crc kubenswrapper[4798]: I1011 04:07:02.496596 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5d4e020e-a94b-4d1e-a0b2-3572df8c2c5e-metrics-certs\") pod \"controller-68d546b9d8-qt94z\" (UID: \"5d4e020e-a94b-4d1e-a0b2-3572df8c2c5e\") " pod="metallb-system/controller-68d546b9d8-qt94z" Oct 11 04:07:02 crc kubenswrapper[4798]: I1011 04:07:02.496625 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6czdr\" (UniqueName: \"kubernetes.io/projected/5d4e020e-a94b-4d1e-a0b2-3572df8c2c5e-kube-api-access-6czdr\") pod \"controller-68d546b9d8-qt94z\" (UID: \"5d4e020e-a94b-4d1e-a0b2-3572df8c2c5e\") " pod="metallb-system/controller-68d546b9d8-qt94z" Oct 11 04:07:02 crc kubenswrapper[4798]: I1011 04:07:02.496662 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/5d4e020e-a94b-4d1e-a0b2-3572df8c2c5e-cert\") pod \"controller-68d546b9d8-qt94z\" (UID: \"5d4e020e-a94b-4d1e-a0b2-3572df8c2c5e\") " pod="metallb-system/controller-68d546b9d8-qt94z" Oct 11 04:07:02 crc kubenswrapper[4798]: I1011 04:07:02.496880 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dchlt\" (UniqueName: \"kubernetes.io/projected/8a76e293-0882-4754-b825-688511b6c234-kube-api-access-dchlt\") pod \"speaker-zdkcf\" (UID: \"8a76e293-0882-4754-b825-688511b6c234\") " pod="metallb-system/speaker-zdkcf" Oct 11 04:07:02 crc kubenswrapper[4798]: I1011 04:07:02.497006 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/8a76e293-0882-4754-b825-688511b6c234-metrics-certs\") pod \"speaker-zdkcf\" (UID: \"8a76e293-0882-4754-b825-688511b6c234\") " pod="metallb-system/speaker-zdkcf" Oct 11 04:07:02 crc kubenswrapper[4798]: I1011 04:07:02.598730 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/5d4e020e-a94b-4d1e-a0b2-3572df8c2c5e-cert\") pod \"controller-68d546b9d8-qt94z\" (UID: \"5d4e020e-a94b-4d1e-a0b2-3572df8c2c5e\") " pod="metallb-system/controller-68d546b9d8-qt94z" Oct 11 04:07:02 crc kubenswrapper[4798]: I1011 04:07:02.598786 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dchlt\" (UniqueName: \"kubernetes.io/projected/8a76e293-0882-4754-b825-688511b6c234-kube-api-access-dchlt\") pod \"speaker-zdkcf\" (UID: \"8a76e293-0882-4754-b825-688511b6c234\") " pod="metallb-system/speaker-zdkcf" Oct 11 04:07:02 crc kubenswrapper[4798]: I1011 04:07:02.598821 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/8a76e293-0882-4754-b825-688511b6c234-metrics-certs\") pod \"speaker-zdkcf\" (UID: \"8a76e293-0882-4754-b825-688511b6c234\") " pod="metallb-system/speaker-zdkcf" Oct 11 04:07:02 crc kubenswrapper[4798]: I1011 04:07:02.598875 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/8a76e293-0882-4754-b825-688511b6c234-memberlist\") pod \"speaker-zdkcf\" (UID: \"8a76e293-0882-4754-b825-688511b6c234\") " pod="metallb-system/speaker-zdkcf" Oct 11 04:07:02 crc kubenswrapper[4798]: I1011 04:07:02.598898 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/8a76e293-0882-4754-b825-688511b6c234-metallb-excludel2\") pod \"speaker-zdkcf\" (UID: \"8a76e293-0882-4754-b825-688511b6c234\") " pod="metallb-system/speaker-zdkcf" Oct 11 04:07:02 crc kubenswrapper[4798]: I1011 04:07:02.598923 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5d4e020e-a94b-4d1e-a0b2-3572df8c2c5e-metrics-certs\") pod \"controller-68d546b9d8-qt94z\" (UID: \"5d4e020e-a94b-4d1e-a0b2-3572df8c2c5e\") " pod="metallb-system/controller-68d546b9d8-qt94z" Oct 11 04:07:02 crc kubenswrapper[4798]: I1011 04:07:02.598942 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6czdr\" (UniqueName: \"kubernetes.io/projected/5d4e020e-a94b-4d1e-a0b2-3572df8c2c5e-kube-api-access-6czdr\") pod \"controller-68d546b9d8-qt94z\" (UID: \"5d4e020e-a94b-4d1e-a0b2-3572df8c2c5e\") " pod="metallb-system/controller-68d546b9d8-qt94z" Oct 11 04:07:02 crc kubenswrapper[4798]: E1011 04:07:02.600335 4798 secret.go:188] Couldn't get secret metallb-system/speaker-certs-secret: secret "speaker-certs-secret" not found Oct 11 04:07:02 crc kubenswrapper[4798]: E1011 04:07:02.600521 4798 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/8a76e293-0882-4754-b825-688511b6c234-metrics-certs podName:8a76e293-0882-4754-b825-688511b6c234 nodeName:}" failed. No retries permitted until 2025-10-11 04:07:03.100466758 +0000 UTC m=+718.436756454 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/8a76e293-0882-4754-b825-688511b6c234-metrics-certs") pod "speaker-zdkcf" (UID: "8a76e293-0882-4754-b825-688511b6c234") : secret "speaker-certs-secret" not found Oct 11 04:07:02 crc kubenswrapper[4798]: E1011 04:07:02.600616 4798 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Oct 11 04:07:02 crc kubenswrapper[4798]: E1011 04:07:02.600696 4798 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/8a76e293-0882-4754-b825-688511b6c234-memberlist podName:8a76e293-0882-4754-b825-688511b6c234 nodeName:}" failed. No retries permitted until 2025-10-11 04:07:03.100680123 +0000 UTC m=+718.436969819 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/8a76e293-0882-4754-b825-688511b6c234-memberlist") pod "speaker-zdkcf" (UID: "8a76e293-0882-4754-b825-688511b6c234") : secret "metallb-memberlist" not found Oct 11 04:07:02 crc kubenswrapper[4798]: E1011 04:07:02.601241 4798 secret.go:188] Couldn't get secret metallb-system/controller-certs-secret: secret "controller-certs-secret" not found Oct 11 04:07:02 crc kubenswrapper[4798]: E1011 04:07:02.601345 4798 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5d4e020e-a94b-4d1e-a0b2-3572df8c2c5e-metrics-certs podName:5d4e020e-a94b-4d1e-a0b2-3572df8c2c5e nodeName:}" failed. No retries permitted until 2025-10-11 04:07:03.101321297 +0000 UTC m=+718.437611073 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/5d4e020e-a94b-4d1e-a0b2-3572df8c2c5e-metrics-certs") pod "controller-68d546b9d8-qt94z" (UID: "5d4e020e-a94b-4d1e-a0b2-3572df8c2c5e") : secret "controller-certs-secret" not found Oct 11 04:07:02 crc kubenswrapper[4798]: I1011 04:07:02.602564 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/8a76e293-0882-4754-b825-688511b6c234-metallb-excludel2\") pod \"speaker-zdkcf\" (UID: \"8a76e293-0882-4754-b825-688511b6c234\") " pod="metallb-system/speaker-zdkcf" Oct 11 04:07:02 crc kubenswrapper[4798]: I1011 04:07:02.605544 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/5d4e020e-a94b-4d1e-a0b2-3572df8c2c5e-cert\") pod \"controller-68d546b9d8-qt94z\" (UID: \"5d4e020e-a94b-4d1e-a0b2-3572df8c2c5e\") " pod="metallb-system/controller-68d546b9d8-qt94z" Oct 11 04:07:02 crc kubenswrapper[4798]: I1011 04:07:02.626522 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dchlt\" (UniqueName: \"kubernetes.io/projected/8a76e293-0882-4754-b825-688511b6c234-kube-api-access-dchlt\") pod \"speaker-zdkcf\" (UID: \"8a76e293-0882-4754-b825-688511b6c234\") " pod="metallb-system/speaker-zdkcf" Oct 11 04:07:02 crc kubenswrapper[4798]: I1011 04:07:02.626607 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6czdr\" (UniqueName: \"kubernetes.io/projected/5d4e020e-a94b-4d1e-a0b2-3572df8c2c5e-kube-api-access-6czdr\") pod \"controller-68d546b9d8-qt94z\" (UID: \"5d4e020e-a94b-4d1e-a0b2-3572df8c2c5e\") " pod="metallb-system/controller-68d546b9d8-qt94z" Oct 11 04:07:02 crc kubenswrapper[4798]: I1011 04:07:02.697715 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-64bf5d555-2mrbj"] Oct 11 04:07:02 crc kubenswrapper[4798]: I1011 04:07:02.801191 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/41a4b406-9a78-4d67-828b-81a3ce16248f-metrics-certs\") pod \"frr-k8s-7q5rr\" (UID: \"41a4b406-9a78-4d67-828b-81a3ce16248f\") " pod="metallb-system/frr-k8s-7q5rr" Oct 11 04:07:02 crc kubenswrapper[4798]: I1011 04:07:02.804362 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/41a4b406-9a78-4d67-828b-81a3ce16248f-metrics-certs\") pod \"frr-k8s-7q5rr\" (UID: \"41a4b406-9a78-4d67-828b-81a3ce16248f\") " pod="metallb-system/frr-k8s-7q5rr" Oct 11 04:07:02 crc kubenswrapper[4798]: I1011 04:07:02.934093 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-64bf5d555-2mrbj" event={"ID":"71cb0bdd-c787-4000-b0fd-f7a0e84fa145","Type":"ContainerStarted","Data":"74c0959f7ba1975e1648e6be745c18995c3f0a2cbeadf228aa51bb43c7cfc3df"} Oct 11 04:07:03 crc kubenswrapper[4798]: I1011 04:07:03.026694 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-7q5rr" Oct 11 04:07:03 crc kubenswrapper[4798]: I1011 04:07:03.106289 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/8a76e293-0882-4754-b825-688511b6c234-memberlist\") pod \"speaker-zdkcf\" (UID: \"8a76e293-0882-4754-b825-688511b6c234\") " pod="metallb-system/speaker-zdkcf" Oct 11 04:07:03 crc kubenswrapper[4798]: I1011 04:07:03.106367 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5d4e020e-a94b-4d1e-a0b2-3572df8c2c5e-metrics-certs\") pod \"controller-68d546b9d8-qt94z\" (UID: \"5d4e020e-a94b-4d1e-a0b2-3572df8c2c5e\") " pod="metallb-system/controller-68d546b9d8-qt94z" Oct 11 04:07:03 crc kubenswrapper[4798]: I1011 04:07:03.106453 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/8a76e293-0882-4754-b825-688511b6c234-metrics-certs\") pod \"speaker-zdkcf\" (UID: \"8a76e293-0882-4754-b825-688511b6c234\") " pod="metallb-system/speaker-zdkcf" Oct 11 04:07:03 crc kubenswrapper[4798]: E1011 04:07:03.106487 4798 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Oct 11 04:07:03 crc kubenswrapper[4798]: E1011 04:07:03.106573 4798 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/8a76e293-0882-4754-b825-688511b6c234-memberlist podName:8a76e293-0882-4754-b825-688511b6c234 nodeName:}" failed. No retries permitted until 2025-10-11 04:07:04.106545735 +0000 UTC m=+719.442835421 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/8a76e293-0882-4754-b825-688511b6c234-memberlist") pod "speaker-zdkcf" (UID: "8a76e293-0882-4754-b825-688511b6c234") : secret "metallb-memberlist" not found Oct 11 04:07:03 crc kubenswrapper[4798]: I1011 04:07:03.111323 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/8a76e293-0882-4754-b825-688511b6c234-metrics-certs\") pod \"speaker-zdkcf\" (UID: \"8a76e293-0882-4754-b825-688511b6c234\") " pod="metallb-system/speaker-zdkcf" Oct 11 04:07:03 crc kubenswrapper[4798]: I1011 04:07:03.116331 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5d4e020e-a94b-4d1e-a0b2-3572df8c2c5e-metrics-certs\") pod \"controller-68d546b9d8-qt94z\" (UID: \"5d4e020e-a94b-4d1e-a0b2-3572df8c2c5e\") " pod="metallb-system/controller-68d546b9d8-qt94z" Oct 11 04:07:03 crc kubenswrapper[4798]: I1011 04:07:03.272336 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-68d546b9d8-qt94z" Oct 11 04:07:03 crc kubenswrapper[4798]: I1011 04:07:03.516532 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-68d546b9d8-qt94z"] Oct 11 04:07:03 crc kubenswrapper[4798]: I1011 04:07:03.940369 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-7q5rr" event={"ID":"41a4b406-9a78-4d67-828b-81a3ce16248f","Type":"ContainerStarted","Data":"c120c28b1d31455e4c7252696dbf2b3b43dfe513fb4beb5fe271107a370e7801"} Oct 11 04:07:03 crc kubenswrapper[4798]: I1011 04:07:03.943900 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-68d546b9d8-qt94z" event={"ID":"5d4e020e-a94b-4d1e-a0b2-3572df8c2c5e","Type":"ContainerStarted","Data":"2a88e325bb82c84dd74148ad517e2d4fc56759fd3fd792504ca5be309a32e6ce"} Oct 11 04:07:03 crc kubenswrapper[4798]: I1011 04:07:03.943931 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-68d546b9d8-qt94z" event={"ID":"5d4e020e-a94b-4d1e-a0b2-3572df8c2c5e","Type":"ContainerStarted","Data":"7e3ad6dcd133779913a42380d6aa24005f2e86a1c61be13f4bfcc747e9b1185b"} Oct 11 04:07:03 crc kubenswrapper[4798]: I1011 04:07:03.943942 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-68d546b9d8-qt94z" event={"ID":"5d4e020e-a94b-4d1e-a0b2-3572df8c2c5e","Type":"ContainerStarted","Data":"46e7103217692329e0b9280d032704cf16b01da8f2f7d489c65d3b08e24ba393"} Oct 11 04:07:03 crc kubenswrapper[4798]: I1011 04:07:03.944092 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/controller-68d546b9d8-qt94z" Oct 11 04:07:03 crc kubenswrapper[4798]: I1011 04:07:03.964014 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/controller-68d546b9d8-qt94z" podStartSLOduration=1.963988712 podStartE2EDuration="1.963988712s" podCreationTimestamp="2025-10-11 04:07:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 04:07:03.961452054 +0000 UTC m=+719.297741750" watchObservedRunningTime="2025-10-11 04:07:03.963988712 +0000 UTC m=+719.300278398" Oct 11 04:07:04 crc kubenswrapper[4798]: I1011 04:07:04.121213 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/8a76e293-0882-4754-b825-688511b6c234-memberlist\") pod \"speaker-zdkcf\" (UID: \"8a76e293-0882-4754-b825-688511b6c234\") " pod="metallb-system/speaker-zdkcf" Oct 11 04:07:04 crc kubenswrapper[4798]: I1011 04:07:04.143246 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/8a76e293-0882-4754-b825-688511b6c234-memberlist\") pod \"speaker-zdkcf\" (UID: \"8a76e293-0882-4754-b825-688511b6c234\") " pod="metallb-system/speaker-zdkcf" Oct 11 04:07:04 crc kubenswrapper[4798]: I1011 04:07:04.179541 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-zdkcf" Oct 11 04:07:04 crc kubenswrapper[4798]: W1011 04:07:04.219424 4798 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8a76e293_0882_4754_b825_688511b6c234.slice/crio-7c8e77146aa08465d950dea063920a31365d2b8a7db889c7e4ab19f46f65bf98 WatchSource:0}: Error finding container 7c8e77146aa08465d950dea063920a31365d2b8a7db889c7e4ab19f46f65bf98: Status 404 returned error can't find the container with id 7c8e77146aa08465d950dea063920a31365d2b8a7db889c7e4ab19f46f65bf98 Oct 11 04:07:04 crc kubenswrapper[4798]: I1011 04:07:04.952678 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-zdkcf" event={"ID":"8a76e293-0882-4754-b825-688511b6c234","Type":"ContainerStarted","Data":"fe14c47df86688f2b5ff1c0cdddd1ffd174e5aa0c12ef760ef75d92aa91f0fc7"} Oct 11 04:07:04 crc kubenswrapper[4798]: I1011 04:07:04.952732 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-zdkcf" event={"ID":"8a76e293-0882-4754-b825-688511b6c234","Type":"ContainerStarted","Data":"ea5945c469ace4fe30de2f72f5bac5b4d0b23611543a04e06520fa72186260c4"} Oct 11 04:07:04 crc kubenswrapper[4798]: I1011 04:07:04.952743 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-zdkcf" event={"ID":"8a76e293-0882-4754-b825-688511b6c234","Type":"ContainerStarted","Data":"7c8e77146aa08465d950dea063920a31365d2b8a7db889c7e4ab19f46f65bf98"} Oct 11 04:07:04 crc kubenswrapper[4798]: I1011 04:07:04.952950 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/speaker-zdkcf" Oct 11 04:07:04 crc kubenswrapper[4798]: I1011 04:07:04.982916 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/speaker-zdkcf" podStartSLOduration=2.982893154 podStartE2EDuration="2.982893154s" podCreationTimestamp="2025-10-11 04:07:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 04:07:04.97883302 +0000 UTC m=+720.315122726" watchObservedRunningTime="2025-10-11 04:07:04.982893154 +0000 UTC m=+720.319182840" Oct 11 04:07:10 crc kubenswrapper[4798]: I1011 04:07:10.994278 4798 generic.go:334] "Generic (PLEG): container finished" podID="41a4b406-9a78-4d67-828b-81a3ce16248f" containerID="4fdf224900c6041934ab593e45db9d2e4ef45aedc5ab425cd60a7538bc05d53e" exitCode=0 Oct 11 04:07:10 crc kubenswrapper[4798]: I1011 04:07:10.994682 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-7q5rr" event={"ID":"41a4b406-9a78-4d67-828b-81a3ce16248f","Type":"ContainerDied","Data":"4fdf224900c6041934ab593e45db9d2e4ef45aedc5ab425cd60a7538bc05d53e"} Oct 11 04:07:10 crc kubenswrapper[4798]: I1011 04:07:10.998877 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-64bf5d555-2mrbj" event={"ID":"71cb0bdd-c787-4000-b0fd-f7a0e84fa145","Type":"ContainerStarted","Data":"9ddc6b474273d757ca5de42a8d7423cd0579d9446ce8df0436278a98f0bf1c48"} Oct 11 04:07:10 crc kubenswrapper[4798]: I1011 04:07:10.999129 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-webhook-server-64bf5d555-2mrbj" Oct 11 04:07:11 crc kubenswrapper[4798]: I1011 04:07:11.053202 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-webhook-server-64bf5d555-2mrbj" podStartSLOduration=1.385281733 podStartE2EDuration="9.05317912s" podCreationTimestamp="2025-10-11 04:07:02 +0000 UTC" firstStartedPulling="2025-10-11 04:07:02.703226163 +0000 UTC m=+718.039515849" lastFinishedPulling="2025-10-11 04:07:10.37112355 +0000 UTC m=+725.707413236" observedRunningTime="2025-10-11 04:07:11.052090504 +0000 UTC m=+726.388380210" watchObservedRunningTime="2025-10-11 04:07:11.05317912 +0000 UTC m=+726.389468806" Oct 11 04:07:12 crc kubenswrapper[4798]: I1011 04:07:12.008373 4798 generic.go:334] "Generic (PLEG): container finished" podID="41a4b406-9a78-4d67-828b-81a3ce16248f" containerID="ae76dec3b1b40b10c7678ce47f65631625f4d561929070718d398b65c73cbc52" exitCode=0 Oct 11 04:07:12 crc kubenswrapper[4798]: I1011 04:07:12.008461 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-7q5rr" event={"ID":"41a4b406-9a78-4d67-828b-81a3ce16248f","Type":"ContainerDied","Data":"ae76dec3b1b40b10c7678ce47f65631625f4d561929070718d398b65c73cbc52"} Oct 11 04:07:13 crc kubenswrapper[4798]: I1011 04:07:13.015462 4798 generic.go:334] "Generic (PLEG): container finished" podID="41a4b406-9a78-4d67-828b-81a3ce16248f" containerID="813540fb6d98223efa6c190d41da2277c8951845a6ab7bc4b27627ddcc0fab24" exitCode=0 Oct 11 04:07:13 crc kubenswrapper[4798]: I1011 04:07:13.015543 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-7q5rr" event={"ID":"41a4b406-9a78-4d67-828b-81a3ce16248f","Type":"ContainerDied","Data":"813540fb6d98223efa6c190d41da2277c8951845a6ab7bc4b27627ddcc0fab24"} Oct 11 04:07:13 crc kubenswrapper[4798]: I1011 04:07:13.276431 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/controller-68d546b9d8-qt94z" Oct 11 04:07:14 crc kubenswrapper[4798]: I1011 04:07:14.028689 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-7q5rr" event={"ID":"41a4b406-9a78-4d67-828b-81a3ce16248f","Type":"ContainerStarted","Data":"a2b84de6ac555082e51acdfba3ff828cc3e5799d8d785c73697a3aa137579172"} Oct 11 04:07:14 crc kubenswrapper[4798]: I1011 04:07:14.028995 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-7q5rr" event={"ID":"41a4b406-9a78-4d67-828b-81a3ce16248f","Type":"ContainerStarted","Data":"5ab8fc5a979aa8b4147878b7f0780a113f7191cec5e51262e41fc89c5eb71a84"} Oct 11 04:07:14 crc kubenswrapper[4798]: I1011 04:07:14.029005 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-7q5rr" event={"ID":"41a4b406-9a78-4d67-828b-81a3ce16248f","Type":"ContainerStarted","Data":"324bdcd09f5daec068d8501789f8c1fa96738d1e77ea1d0352aaf9a204c9c753"} Oct 11 04:07:14 crc kubenswrapper[4798]: I1011 04:07:14.029013 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-7q5rr" event={"ID":"41a4b406-9a78-4d67-828b-81a3ce16248f","Type":"ContainerStarted","Data":"03ea36f7b3c8ab3cf2b667106b57f64b88af5e32f2dc02a33d960cadd1b1266a"} Oct 11 04:07:14 crc kubenswrapper[4798]: I1011 04:07:14.029021 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-7q5rr" event={"ID":"41a4b406-9a78-4d67-828b-81a3ce16248f","Type":"ContainerStarted","Data":"9bd8b6ed0350d69736fcc69903edb6226b3a52558cfd67e921219c53e4e4e8e4"} Oct 11 04:07:14 crc kubenswrapper[4798]: I1011 04:07:14.184168 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/speaker-zdkcf" Oct 11 04:07:15 crc kubenswrapper[4798]: I1011 04:07:15.045045 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-7q5rr" event={"ID":"41a4b406-9a78-4d67-828b-81a3ce16248f","Type":"ContainerStarted","Data":"488ea362aa1eaf343f30776428bb1b569f349983c29bc8b858915e798178c255"} Oct 11 04:07:15 crc kubenswrapper[4798]: I1011 04:07:15.045483 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-7q5rr" Oct 11 04:07:18 crc kubenswrapper[4798]: I1011 04:07:18.027588 4798 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="metallb-system/frr-k8s-7q5rr" Oct 11 04:07:18 crc kubenswrapper[4798]: I1011 04:07:18.087537 4798 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="metallb-system/frr-k8s-7q5rr" Oct 11 04:07:18 crc kubenswrapper[4798]: I1011 04:07:18.117957 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-7q5rr" podStartSLOduration=8.888624073999999 podStartE2EDuration="16.117934577s" podCreationTimestamp="2025-10-11 04:07:02 +0000 UTC" firstStartedPulling="2025-10-11 04:07:03.16037372 +0000 UTC m=+718.496663446" lastFinishedPulling="2025-10-11 04:07:10.389684263 +0000 UTC m=+725.725973949" observedRunningTime="2025-10-11 04:07:15.074349698 +0000 UTC m=+730.410639414" watchObservedRunningTime="2025-10-11 04:07:18.117934577 +0000 UTC m=+733.454224263" Oct 11 04:07:20 crc kubenswrapper[4798]: I1011 04:07:20.575796 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-index-jz6k7"] Oct 11 04:07:20 crc kubenswrapper[4798]: I1011 04:07:20.576830 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-jz6k7" Oct 11 04:07:20 crc kubenswrapper[4798]: I1011 04:07:20.578993 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"openshift-service-ca.crt" Oct 11 04:07:20 crc kubenswrapper[4798]: I1011 04:07:20.579724 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"kube-root-ca.crt" Oct 11 04:07:20 crc kubenswrapper[4798]: I1011 04:07:20.587568 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-index-dockercfg-54vr9" Oct 11 04:07:20 crc kubenswrapper[4798]: I1011 04:07:20.588459 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-jz6k7"] Oct 11 04:07:20 crc kubenswrapper[4798]: I1011 04:07:20.661122 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-85t6m\" (UniqueName: \"kubernetes.io/projected/51b01c20-81ca-44e0-9900-255a35e42b1f-kube-api-access-85t6m\") pod \"openstack-operator-index-jz6k7\" (UID: \"51b01c20-81ca-44e0-9900-255a35e42b1f\") " pod="openstack-operators/openstack-operator-index-jz6k7" Oct 11 04:07:20 crc kubenswrapper[4798]: I1011 04:07:20.762465 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-85t6m\" (UniqueName: \"kubernetes.io/projected/51b01c20-81ca-44e0-9900-255a35e42b1f-kube-api-access-85t6m\") pod \"openstack-operator-index-jz6k7\" (UID: \"51b01c20-81ca-44e0-9900-255a35e42b1f\") " pod="openstack-operators/openstack-operator-index-jz6k7" Oct 11 04:07:20 crc kubenswrapper[4798]: I1011 04:07:20.788349 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-85t6m\" (UniqueName: \"kubernetes.io/projected/51b01c20-81ca-44e0-9900-255a35e42b1f-kube-api-access-85t6m\") pod \"openstack-operator-index-jz6k7\" (UID: \"51b01c20-81ca-44e0-9900-255a35e42b1f\") " pod="openstack-operators/openstack-operator-index-jz6k7" Oct 11 04:07:20 crc kubenswrapper[4798]: I1011 04:07:20.897874 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-jz6k7" Oct 11 04:07:21 crc kubenswrapper[4798]: I1011 04:07:21.113098 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-jz6k7"] Oct 11 04:07:22 crc kubenswrapper[4798]: I1011 04:07:22.103158 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-jz6k7" event={"ID":"51b01c20-81ca-44e0-9900-255a35e42b1f","Type":"ContainerStarted","Data":"c0d08656c9f06115a36d34f881400e5997f58901da88fc11ad6a334016f7d554"} Oct 11 04:07:22 crc kubenswrapper[4798]: I1011 04:07:22.464222 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-webhook-server-64bf5d555-2mrbj" Oct 11 04:07:23 crc kubenswrapper[4798]: I1011 04:07:23.031376 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-7q5rr" Oct 11 04:07:24 crc kubenswrapper[4798]: I1011 04:07:24.117702 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-jz6k7" event={"ID":"51b01c20-81ca-44e0-9900-255a35e42b1f","Type":"ContainerStarted","Data":"f5ad3625e4443255c60f2d507f899ded47160ca3b25e94dff5074827a32d18c4"} Oct 11 04:07:24 crc kubenswrapper[4798]: I1011 04:07:24.145151 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-index-jz6k7" podStartSLOduration=1.673354896 podStartE2EDuration="4.145127996s" podCreationTimestamp="2025-10-11 04:07:20 +0000 UTC" firstStartedPulling="2025-10-11 04:07:21.133901621 +0000 UTC m=+736.470191317" lastFinishedPulling="2025-10-11 04:07:23.605674731 +0000 UTC m=+738.941964417" observedRunningTime="2025-10-11 04:07:24.139297981 +0000 UTC m=+739.475587667" watchObservedRunningTime="2025-10-11 04:07:24.145127996 +0000 UTC m=+739.481417672" Oct 11 04:07:25 crc kubenswrapper[4798]: I1011 04:07:25.770946 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/openstack-operator-index-jz6k7"] Oct 11 04:07:26 crc kubenswrapper[4798]: I1011 04:07:26.131923 4798 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack-operators/openstack-operator-index-jz6k7" podUID="51b01c20-81ca-44e0-9900-255a35e42b1f" containerName="registry-server" containerID="cri-o://f5ad3625e4443255c60f2d507f899ded47160ca3b25e94dff5074827a32d18c4" gracePeriod=2 Oct 11 04:07:26 crc kubenswrapper[4798]: I1011 04:07:26.376548 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-index-nkfzg"] Oct 11 04:07:26 crc kubenswrapper[4798]: I1011 04:07:26.377298 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-nkfzg" Oct 11 04:07:26 crc kubenswrapper[4798]: I1011 04:07:26.397948 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-nkfzg"] Oct 11 04:07:26 crc kubenswrapper[4798]: I1011 04:07:26.452433 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8wbhc\" (UniqueName: \"kubernetes.io/projected/18c8d96d-3c6b-480f-bea7-1bfbc2d871e3-kube-api-access-8wbhc\") pod \"openstack-operator-index-nkfzg\" (UID: \"18c8d96d-3c6b-480f-bea7-1bfbc2d871e3\") " pod="openstack-operators/openstack-operator-index-nkfzg" Oct 11 04:07:26 crc kubenswrapper[4798]: I1011 04:07:26.527968 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-jz6k7" Oct 11 04:07:26 crc kubenswrapper[4798]: I1011 04:07:26.555798 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-85t6m\" (UniqueName: \"kubernetes.io/projected/51b01c20-81ca-44e0-9900-255a35e42b1f-kube-api-access-85t6m\") pod \"51b01c20-81ca-44e0-9900-255a35e42b1f\" (UID: \"51b01c20-81ca-44e0-9900-255a35e42b1f\") " Oct 11 04:07:26 crc kubenswrapper[4798]: I1011 04:07:26.556675 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8wbhc\" (UniqueName: \"kubernetes.io/projected/18c8d96d-3c6b-480f-bea7-1bfbc2d871e3-kube-api-access-8wbhc\") pod \"openstack-operator-index-nkfzg\" (UID: \"18c8d96d-3c6b-480f-bea7-1bfbc2d871e3\") " pod="openstack-operators/openstack-operator-index-nkfzg" Oct 11 04:07:26 crc kubenswrapper[4798]: I1011 04:07:26.562181 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/51b01c20-81ca-44e0-9900-255a35e42b1f-kube-api-access-85t6m" (OuterVolumeSpecName: "kube-api-access-85t6m") pod "51b01c20-81ca-44e0-9900-255a35e42b1f" (UID: "51b01c20-81ca-44e0-9900-255a35e42b1f"). InnerVolumeSpecName "kube-api-access-85t6m". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 04:07:26 crc kubenswrapper[4798]: I1011 04:07:26.574441 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8wbhc\" (UniqueName: \"kubernetes.io/projected/18c8d96d-3c6b-480f-bea7-1bfbc2d871e3-kube-api-access-8wbhc\") pod \"openstack-operator-index-nkfzg\" (UID: \"18c8d96d-3c6b-480f-bea7-1bfbc2d871e3\") " pod="openstack-operators/openstack-operator-index-nkfzg" Oct 11 04:07:26 crc kubenswrapper[4798]: I1011 04:07:26.658202 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-85t6m\" (UniqueName: \"kubernetes.io/projected/51b01c20-81ca-44e0-9900-255a35e42b1f-kube-api-access-85t6m\") on node \"crc\" DevicePath \"\"" Oct 11 04:07:26 crc kubenswrapper[4798]: I1011 04:07:26.701086 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-nkfzg" Oct 11 04:07:27 crc kubenswrapper[4798]: I1011 04:07:27.138839 4798 patch_prober.go:28] interesting pod/machine-config-daemon-h28s2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 11 04:07:27 crc kubenswrapper[4798]: I1011 04:07:27.138903 4798 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 11 04:07:27 crc kubenswrapper[4798]: I1011 04:07:27.138969 4798 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" Oct 11 04:07:27 crc kubenswrapper[4798]: I1011 04:07:27.139645 4798 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"c3b0e40f46ff6d5e205dd95a80f3265b5b1e7509ae9d7aa46df649af185d464c"} pod="openshift-machine-config-operator/machine-config-daemon-h28s2" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Oct 11 04:07:27 crc kubenswrapper[4798]: I1011 04:07:27.139704 4798 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" containerName="machine-config-daemon" containerID="cri-o://c3b0e40f46ff6d5e205dd95a80f3265b5b1e7509ae9d7aa46df649af185d464c" gracePeriod=600 Oct 11 04:07:27 crc kubenswrapper[4798]: I1011 04:07:27.143210 4798 generic.go:334] "Generic (PLEG): container finished" podID="51b01c20-81ca-44e0-9900-255a35e42b1f" containerID="f5ad3625e4443255c60f2d507f899ded47160ca3b25e94dff5074827a32d18c4" exitCode=0 Oct 11 04:07:27 crc kubenswrapper[4798]: I1011 04:07:27.143244 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-jz6k7" event={"ID":"51b01c20-81ca-44e0-9900-255a35e42b1f","Type":"ContainerDied","Data":"f5ad3625e4443255c60f2d507f899ded47160ca3b25e94dff5074827a32d18c4"} Oct 11 04:07:27 crc kubenswrapper[4798]: I1011 04:07:27.143255 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-jz6k7" Oct 11 04:07:27 crc kubenswrapper[4798]: I1011 04:07:27.143267 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-jz6k7" event={"ID":"51b01c20-81ca-44e0-9900-255a35e42b1f","Type":"ContainerDied","Data":"c0d08656c9f06115a36d34f881400e5997f58901da88fc11ad6a334016f7d554"} Oct 11 04:07:27 crc kubenswrapper[4798]: I1011 04:07:27.143286 4798 scope.go:117] "RemoveContainer" containerID="f5ad3625e4443255c60f2d507f899ded47160ca3b25e94dff5074827a32d18c4" Oct 11 04:07:27 crc kubenswrapper[4798]: I1011 04:07:27.149426 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-nkfzg"] Oct 11 04:07:27 crc kubenswrapper[4798]: I1011 04:07:27.176064 4798 scope.go:117] "RemoveContainer" containerID="f5ad3625e4443255c60f2d507f899ded47160ca3b25e94dff5074827a32d18c4" Oct 11 04:07:27 crc kubenswrapper[4798]: E1011 04:07:27.176460 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f5ad3625e4443255c60f2d507f899ded47160ca3b25e94dff5074827a32d18c4\": container with ID starting with f5ad3625e4443255c60f2d507f899ded47160ca3b25e94dff5074827a32d18c4 not found: ID does not exist" containerID="f5ad3625e4443255c60f2d507f899ded47160ca3b25e94dff5074827a32d18c4" Oct 11 04:07:27 crc kubenswrapper[4798]: I1011 04:07:27.176489 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f5ad3625e4443255c60f2d507f899ded47160ca3b25e94dff5074827a32d18c4"} err="failed to get container status \"f5ad3625e4443255c60f2d507f899ded47160ca3b25e94dff5074827a32d18c4\": rpc error: code = NotFound desc = could not find container \"f5ad3625e4443255c60f2d507f899ded47160ca3b25e94dff5074827a32d18c4\": container with ID starting with f5ad3625e4443255c60f2d507f899ded47160ca3b25e94dff5074827a32d18c4 not found: ID does not exist" Oct 11 04:07:27 crc kubenswrapper[4798]: I1011 04:07:27.178405 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/openstack-operator-index-jz6k7"] Oct 11 04:07:27 crc kubenswrapper[4798]: I1011 04:07:27.181448 4798 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack-operators/openstack-operator-index-jz6k7"] Oct 11 04:07:27 crc kubenswrapper[4798]: I1011 04:07:27.430168 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="51b01c20-81ca-44e0-9900-255a35e42b1f" path="/var/lib/kubelet/pods/51b01c20-81ca-44e0-9900-255a35e42b1f/volumes" Oct 11 04:07:28 crc kubenswrapper[4798]: I1011 04:07:28.155564 4798 generic.go:334] "Generic (PLEG): container finished" podID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" containerID="c3b0e40f46ff6d5e205dd95a80f3265b5b1e7509ae9d7aa46df649af185d464c" exitCode=0 Oct 11 04:07:28 crc kubenswrapper[4798]: I1011 04:07:28.155691 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" event={"ID":"42571bc8-2186-4e3b-bba9-28f5a8f364d0","Type":"ContainerDied","Data":"c3b0e40f46ff6d5e205dd95a80f3265b5b1e7509ae9d7aa46df649af185d464c"} Oct 11 04:07:28 crc kubenswrapper[4798]: I1011 04:07:28.156365 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" event={"ID":"42571bc8-2186-4e3b-bba9-28f5a8f364d0","Type":"ContainerStarted","Data":"54c6a0004bcf04af0eb4f7beb3728e8dd11d3c9b035976cf54104ce60f0d2629"} Oct 11 04:07:28 crc kubenswrapper[4798]: I1011 04:07:28.156509 4798 scope.go:117] "RemoveContainer" containerID="9b94e5c3b0d64dfba361ffe34f00ea99c8f4d7e6983b87de0fde65e4be638ab1" Oct 11 04:07:28 crc kubenswrapper[4798]: I1011 04:07:28.162484 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-nkfzg" event={"ID":"18c8d96d-3c6b-480f-bea7-1bfbc2d871e3","Type":"ContainerStarted","Data":"3ba4ebb6cbaa6bf680f2a1333da3bdc15a2e44c3062172698f477ad43e2a88d8"} Oct 11 04:07:28 crc kubenswrapper[4798]: I1011 04:07:28.162541 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-nkfzg" event={"ID":"18c8d96d-3c6b-480f-bea7-1bfbc2d871e3","Type":"ContainerStarted","Data":"603e0b217bf095371841206b899296f0abd28270aaad6cc3fa31d7e04622fa2a"} Oct 11 04:07:28 crc kubenswrapper[4798]: I1011 04:07:28.205943 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-index-nkfzg" podStartSLOduration=2.15889304 podStartE2EDuration="2.205917667s" podCreationTimestamp="2025-10-11 04:07:26 +0000 UTC" firstStartedPulling="2025-10-11 04:07:27.160883087 +0000 UTC m=+742.497172773" lastFinishedPulling="2025-10-11 04:07:27.207907714 +0000 UTC m=+742.544197400" observedRunningTime="2025-10-11 04:07:28.201799571 +0000 UTC m=+743.538089277" watchObservedRunningTime="2025-10-11 04:07:28.205917667 +0000 UTC m=+743.542207353" Oct 11 04:07:33 crc kubenswrapper[4798]: I1011 04:07:33.445511 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-5bvx8"] Oct 11 04:07:33 crc kubenswrapper[4798]: I1011 04:07:33.447979 4798 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-879f6c89f-5bvx8" podUID="7b1d91a6-9b59-466c-a922-22825aac279b" containerName="controller-manager" containerID="cri-o://ce4e7cc4cf2a388205de17dcb03bc4b4436ac73c02c91b5520a5b53aeb79e280" gracePeriod=30 Oct 11 04:07:33 crc kubenswrapper[4798]: I1011 04:07:33.517898 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-9c8v5"] Oct 11 04:07:33 crc kubenswrapper[4798]: I1011 04:07:33.518148 4798 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-9c8v5" podUID="6c3e154e-469a-47c1-a71a-b09597e9d303" containerName="route-controller-manager" containerID="cri-o://6f463880f08e73f586ff610569f6062c31341756428da3ce223ed9836f2d304a" gracePeriod=30 Oct 11 04:07:33 crc kubenswrapper[4798]: I1011 04:07:33.891075 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-5bvx8" Oct 11 04:07:33 crc kubenswrapper[4798]: I1011 04:07:33.966790 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7b1d91a6-9b59-466c-a922-22825aac279b-config\") pod \"7b1d91a6-9b59-466c-a922-22825aac279b\" (UID: \"7b1d91a6-9b59-466c-a922-22825aac279b\") " Oct 11 04:07:33 crc kubenswrapper[4798]: I1011 04:07:33.966924 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7b1d91a6-9b59-466c-a922-22825aac279b-serving-cert\") pod \"7b1d91a6-9b59-466c-a922-22825aac279b\" (UID: \"7b1d91a6-9b59-466c-a922-22825aac279b\") " Oct 11 04:07:33 crc kubenswrapper[4798]: I1011 04:07:33.966971 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7b1d91a6-9b59-466c-a922-22825aac279b-client-ca\") pod \"7b1d91a6-9b59-466c-a922-22825aac279b\" (UID: \"7b1d91a6-9b59-466c-a922-22825aac279b\") " Oct 11 04:07:33 crc kubenswrapper[4798]: I1011 04:07:33.966991 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7b1d91a6-9b59-466c-a922-22825aac279b-proxy-ca-bundles\") pod \"7b1d91a6-9b59-466c-a922-22825aac279b\" (UID: \"7b1d91a6-9b59-466c-a922-22825aac279b\") " Oct 11 04:07:33 crc kubenswrapper[4798]: I1011 04:07:33.967021 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xp9b6\" (UniqueName: \"kubernetes.io/projected/7b1d91a6-9b59-466c-a922-22825aac279b-kube-api-access-xp9b6\") pod \"7b1d91a6-9b59-466c-a922-22825aac279b\" (UID: \"7b1d91a6-9b59-466c-a922-22825aac279b\") " Oct 11 04:07:33 crc kubenswrapper[4798]: I1011 04:07:33.967926 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7b1d91a6-9b59-466c-a922-22825aac279b-client-ca" (OuterVolumeSpecName: "client-ca") pod "7b1d91a6-9b59-466c-a922-22825aac279b" (UID: "7b1d91a6-9b59-466c-a922-22825aac279b"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 04:07:33 crc kubenswrapper[4798]: I1011 04:07:33.968128 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7b1d91a6-9b59-466c-a922-22825aac279b-config" (OuterVolumeSpecName: "config") pod "7b1d91a6-9b59-466c-a922-22825aac279b" (UID: "7b1d91a6-9b59-466c-a922-22825aac279b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 04:07:33 crc kubenswrapper[4798]: I1011 04:07:33.968253 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7b1d91a6-9b59-466c-a922-22825aac279b-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "7b1d91a6-9b59-466c-a922-22825aac279b" (UID: "7b1d91a6-9b59-466c-a922-22825aac279b"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 04:07:33 crc kubenswrapper[4798]: I1011 04:07:33.972801 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7b1d91a6-9b59-466c-a922-22825aac279b-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7b1d91a6-9b59-466c-a922-22825aac279b" (UID: "7b1d91a6-9b59-466c-a922-22825aac279b"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:07:33 crc kubenswrapper[4798]: I1011 04:07:33.973154 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7b1d91a6-9b59-466c-a922-22825aac279b-kube-api-access-xp9b6" (OuterVolumeSpecName: "kube-api-access-xp9b6") pod "7b1d91a6-9b59-466c-a922-22825aac279b" (UID: "7b1d91a6-9b59-466c-a922-22825aac279b"). InnerVolumeSpecName "kube-api-access-xp9b6". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 04:07:33 crc kubenswrapper[4798]: I1011 04:07:33.978978 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-9c8v5" Oct 11 04:07:34 crc kubenswrapper[4798]: I1011 04:07:34.068104 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6c3e154e-469a-47c1-a71a-b09597e9d303-config\") pod \"6c3e154e-469a-47c1-a71a-b09597e9d303\" (UID: \"6c3e154e-469a-47c1-a71a-b09597e9d303\") " Oct 11 04:07:34 crc kubenswrapper[4798]: I1011 04:07:34.068199 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6c3e154e-469a-47c1-a71a-b09597e9d303-serving-cert\") pod \"6c3e154e-469a-47c1-a71a-b09597e9d303\" (UID: \"6c3e154e-469a-47c1-a71a-b09597e9d303\") " Oct 11 04:07:34 crc kubenswrapper[4798]: I1011 04:07:34.068228 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/6c3e154e-469a-47c1-a71a-b09597e9d303-client-ca\") pod \"6c3e154e-469a-47c1-a71a-b09597e9d303\" (UID: \"6c3e154e-469a-47c1-a71a-b09597e9d303\") " Oct 11 04:07:34 crc kubenswrapper[4798]: I1011 04:07:34.068254 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8vfz2\" (UniqueName: \"kubernetes.io/projected/6c3e154e-469a-47c1-a71a-b09597e9d303-kube-api-access-8vfz2\") pod \"6c3e154e-469a-47c1-a71a-b09597e9d303\" (UID: \"6c3e154e-469a-47c1-a71a-b09597e9d303\") " Oct 11 04:07:34 crc kubenswrapper[4798]: I1011 04:07:34.068481 4798 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7b1d91a6-9b59-466c-a922-22825aac279b-serving-cert\") on node \"crc\" DevicePath \"\"" Oct 11 04:07:34 crc kubenswrapper[4798]: I1011 04:07:34.068493 4798 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7b1d91a6-9b59-466c-a922-22825aac279b-client-ca\") on node \"crc\" DevicePath \"\"" Oct 11 04:07:34 crc kubenswrapper[4798]: I1011 04:07:34.068502 4798 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7b1d91a6-9b59-466c-a922-22825aac279b-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Oct 11 04:07:34 crc kubenswrapper[4798]: I1011 04:07:34.068513 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xp9b6\" (UniqueName: \"kubernetes.io/projected/7b1d91a6-9b59-466c-a922-22825aac279b-kube-api-access-xp9b6\") on node \"crc\" DevicePath \"\"" Oct 11 04:07:34 crc kubenswrapper[4798]: I1011 04:07:34.068524 4798 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7b1d91a6-9b59-466c-a922-22825aac279b-config\") on node \"crc\" DevicePath \"\"" Oct 11 04:07:34 crc kubenswrapper[4798]: I1011 04:07:34.069385 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6c3e154e-469a-47c1-a71a-b09597e9d303-config" (OuterVolumeSpecName: "config") pod "6c3e154e-469a-47c1-a71a-b09597e9d303" (UID: "6c3e154e-469a-47c1-a71a-b09597e9d303"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 04:07:34 crc kubenswrapper[4798]: I1011 04:07:34.069578 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6c3e154e-469a-47c1-a71a-b09597e9d303-client-ca" (OuterVolumeSpecName: "client-ca") pod "6c3e154e-469a-47c1-a71a-b09597e9d303" (UID: "6c3e154e-469a-47c1-a71a-b09597e9d303"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 04:07:34 crc kubenswrapper[4798]: I1011 04:07:34.072380 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6c3e154e-469a-47c1-a71a-b09597e9d303-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "6c3e154e-469a-47c1-a71a-b09597e9d303" (UID: "6c3e154e-469a-47c1-a71a-b09597e9d303"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:07:34 crc kubenswrapper[4798]: I1011 04:07:34.072683 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6c3e154e-469a-47c1-a71a-b09597e9d303-kube-api-access-8vfz2" (OuterVolumeSpecName: "kube-api-access-8vfz2") pod "6c3e154e-469a-47c1-a71a-b09597e9d303" (UID: "6c3e154e-469a-47c1-a71a-b09597e9d303"). InnerVolumeSpecName "kube-api-access-8vfz2". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 04:07:34 crc kubenswrapper[4798]: I1011 04:07:34.169788 4798 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6c3e154e-469a-47c1-a71a-b09597e9d303-config\") on node \"crc\" DevicePath \"\"" Oct 11 04:07:34 crc kubenswrapper[4798]: I1011 04:07:34.169856 4798 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6c3e154e-469a-47c1-a71a-b09597e9d303-serving-cert\") on node \"crc\" DevicePath \"\"" Oct 11 04:07:34 crc kubenswrapper[4798]: I1011 04:07:34.169880 4798 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/6c3e154e-469a-47c1-a71a-b09597e9d303-client-ca\") on node \"crc\" DevicePath \"\"" Oct 11 04:07:34 crc kubenswrapper[4798]: I1011 04:07:34.169907 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8vfz2\" (UniqueName: \"kubernetes.io/projected/6c3e154e-469a-47c1-a71a-b09597e9d303-kube-api-access-8vfz2\") on node \"crc\" DevicePath \"\"" Oct 11 04:07:34 crc kubenswrapper[4798]: I1011 04:07:34.223030 4798 generic.go:334] "Generic (PLEG): container finished" podID="7b1d91a6-9b59-466c-a922-22825aac279b" containerID="ce4e7cc4cf2a388205de17dcb03bc4b4436ac73c02c91b5520a5b53aeb79e280" exitCode=0 Oct 11 04:07:34 crc kubenswrapper[4798]: I1011 04:07:34.223124 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-5bvx8" event={"ID":"7b1d91a6-9b59-466c-a922-22825aac279b","Type":"ContainerDied","Data":"ce4e7cc4cf2a388205de17dcb03bc4b4436ac73c02c91b5520a5b53aeb79e280"} Oct 11 04:07:34 crc kubenswrapper[4798]: I1011 04:07:34.223156 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-5bvx8" event={"ID":"7b1d91a6-9b59-466c-a922-22825aac279b","Type":"ContainerDied","Data":"fc2062458781ad220c015eee1187cd4892879287b02f7d33d96de621b30afb9c"} Oct 11 04:07:34 crc kubenswrapper[4798]: I1011 04:07:34.223176 4798 scope.go:117] "RemoveContainer" containerID="ce4e7cc4cf2a388205de17dcb03bc4b4436ac73c02c91b5520a5b53aeb79e280" Oct 11 04:07:34 crc kubenswrapper[4798]: I1011 04:07:34.223169 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-5bvx8" Oct 11 04:07:34 crc kubenswrapper[4798]: I1011 04:07:34.224953 4798 generic.go:334] "Generic (PLEG): container finished" podID="6c3e154e-469a-47c1-a71a-b09597e9d303" containerID="6f463880f08e73f586ff610569f6062c31341756428da3ce223ed9836f2d304a" exitCode=0 Oct 11 04:07:34 crc kubenswrapper[4798]: I1011 04:07:34.225007 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-9c8v5" event={"ID":"6c3e154e-469a-47c1-a71a-b09597e9d303","Type":"ContainerDied","Data":"6f463880f08e73f586ff610569f6062c31341756428da3ce223ed9836f2d304a"} Oct 11 04:07:34 crc kubenswrapper[4798]: I1011 04:07:34.225044 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-9c8v5" event={"ID":"6c3e154e-469a-47c1-a71a-b09597e9d303","Type":"ContainerDied","Data":"a1c368421eddc8056650b3ea0e64d005299e6107d93dd6b8208388afc635b757"} Oct 11 04:07:34 crc kubenswrapper[4798]: I1011 04:07:34.225151 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-9c8v5" Oct 11 04:07:34 crc kubenswrapper[4798]: I1011 04:07:34.242744 4798 scope.go:117] "RemoveContainer" containerID="ce4e7cc4cf2a388205de17dcb03bc4b4436ac73c02c91b5520a5b53aeb79e280" Oct 11 04:07:34 crc kubenswrapper[4798]: E1011 04:07:34.245205 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ce4e7cc4cf2a388205de17dcb03bc4b4436ac73c02c91b5520a5b53aeb79e280\": container with ID starting with ce4e7cc4cf2a388205de17dcb03bc4b4436ac73c02c91b5520a5b53aeb79e280 not found: ID does not exist" containerID="ce4e7cc4cf2a388205de17dcb03bc4b4436ac73c02c91b5520a5b53aeb79e280" Oct 11 04:07:34 crc kubenswrapper[4798]: I1011 04:07:34.245260 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ce4e7cc4cf2a388205de17dcb03bc4b4436ac73c02c91b5520a5b53aeb79e280"} err="failed to get container status \"ce4e7cc4cf2a388205de17dcb03bc4b4436ac73c02c91b5520a5b53aeb79e280\": rpc error: code = NotFound desc = could not find container \"ce4e7cc4cf2a388205de17dcb03bc4b4436ac73c02c91b5520a5b53aeb79e280\": container with ID starting with ce4e7cc4cf2a388205de17dcb03bc4b4436ac73c02c91b5520a5b53aeb79e280 not found: ID does not exist" Oct 11 04:07:34 crc kubenswrapper[4798]: I1011 04:07:34.245302 4798 scope.go:117] "RemoveContainer" containerID="6f463880f08e73f586ff610569f6062c31341756428da3ce223ed9836f2d304a" Oct 11 04:07:34 crc kubenswrapper[4798]: I1011 04:07:34.268589 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-9c8v5"] Oct 11 04:07:34 crc kubenswrapper[4798]: I1011 04:07:34.277323 4798 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-9c8v5"] Oct 11 04:07:34 crc kubenswrapper[4798]: I1011 04:07:34.286341 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-5bvx8"] Oct 11 04:07:34 crc kubenswrapper[4798]: I1011 04:07:34.290263 4798 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-5bvx8"] Oct 11 04:07:34 crc kubenswrapper[4798]: I1011 04:07:34.290328 4798 scope.go:117] "RemoveContainer" containerID="6f463880f08e73f586ff610569f6062c31341756428da3ce223ed9836f2d304a" Oct 11 04:07:34 crc kubenswrapper[4798]: E1011 04:07:34.290881 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6f463880f08e73f586ff610569f6062c31341756428da3ce223ed9836f2d304a\": container with ID starting with 6f463880f08e73f586ff610569f6062c31341756428da3ce223ed9836f2d304a not found: ID does not exist" containerID="6f463880f08e73f586ff610569f6062c31341756428da3ce223ed9836f2d304a" Oct 11 04:07:34 crc kubenswrapper[4798]: I1011 04:07:34.290915 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6f463880f08e73f586ff610569f6062c31341756428da3ce223ed9836f2d304a"} err="failed to get container status \"6f463880f08e73f586ff610569f6062c31341756428da3ce223ed9836f2d304a\": rpc error: code = NotFound desc = could not find container \"6f463880f08e73f586ff610569f6062c31341756428da3ce223ed9836f2d304a\": container with ID starting with 6f463880f08e73f586ff610569f6062c31341756428da3ce223ed9836f2d304a not found: ID does not exist" Oct 11 04:07:34 crc kubenswrapper[4798]: I1011 04:07:34.746298 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-6d55bcf4fd-9h8gv"] Oct 11 04:07:34 crc kubenswrapper[4798]: E1011 04:07:34.746862 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7b1d91a6-9b59-466c-a922-22825aac279b" containerName="controller-manager" Oct 11 04:07:34 crc kubenswrapper[4798]: I1011 04:07:34.746875 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="7b1d91a6-9b59-466c-a922-22825aac279b" containerName="controller-manager" Oct 11 04:07:34 crc kubenswrapper[4798]: E1011 04:07:34.746896 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6c3e154e-469a-47c1-a71a-b09597e9d303" containerName="route-controller-manager" Oct 11 04:07:34 crc kubenswrapper[4798]: I1011 04:07:34.746902 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="6c3e154e-469a-47c1-a71a-b09597e9d303" containerName="route-controller-manager" Oct 11 04:07:34 crc kubenswrapper[4798]: E1011 04:07:34.746909 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="51b01c20-81ca-44e0-9900-255a35e42b1f" containerName="registry-server" Oct 11 04:07:34 crc kubenswrapper[4798]: I1011 04:07:34.746915 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="51b01c20-81ca-44e0-9900-255a35e42b1f" containerName="registry-server" Oct 11 04:07:34 crc kubenswrapper[4798]: I1011 04:07:34.747007 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="51b01c20-81ca-44e0-9900-255a35e42b1f" containerName="registry-server" Oct 11 04:07:34 crc kubenswrapper[4798]: I1011 04:07:34.747029 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="7b1d91a6-9b59-466c-a922-22825aac279b" containerName="controller-manager" Oct 11 04:07:34 crc kubenswrapper[4798]: I1011 04:07:34.747039 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="6c3e154e-469a-47c1-a71a-b09597e9d303" containerName="route-controller-manager" Oct 11 04:07:34 crc kubenswrapper[4798]: I1011 04:07:34.747458 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6d55bcf4fd-9h8gv" Oct 11 04:07:34 crc kubenswrapper[4798]: I1011 04:07:34.751938 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Oct 11 04:07:34 crc kubenswrapper[4798]: I1011 04:07:34.752374 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Oct 11 04:07:34 crc kubenswrapper[4798]: I1011 04:07:34.752623 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Oct 11 04:07:34 crc kubenswrapper[4798]: I1011 04:07:34.752699 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Oct 11 04:07:34 crc kubenswrapper[4798]: I1011 04:07:34.753020 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Oct 11 04:07:34 crc kubenswrapper[4798]: I1011 04:07:34.753142 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Oct 11 04:07:34 crc kubenswrapper[4798]: I1011 04:07:34.753454 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-9ffdbc549-wsjnx"] Oct 11 04:07:34 crc kubenswrapper[4798]: I1011 04:07:34.754704 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-9ffdbc549-wsjnx" Oct 11 04:07:34 crc kubenswrapper[4798]: I1011 04:07:34.758454 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Oct 11 04:07:34 crc kubenswrapper[4798]: I1011 04:07:34.758570 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Oct 11 04:07:34 crc kubenswrapper[4798]: I1011 04:07:34.758597 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Oct 11 04:07:34 crc kubenswrapper[4798]: I1011 04:07:34.759098 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Oct 11 04:07:34 crc kubenswrapper[4798]: I1011 04:07:34.759126 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Oct 11 04:07:34 crc kubenswrapper[4798]: I1011 04:07:34.759433 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Oct 11 04:07:34 crc kubenswrapper[4798]: I1011 04:07:34.761074 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Oct 11 04:07:34 crc kubenswrapper[4798]: I1011 04:07:34.768237 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-6d55bcf4fd-9h8gv"] Oct 11 04:07:34 crc kubenswrapper[4798]: I1011 04:07:34.771950 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-9ffdbc549-wsjnx"] Oct 11 04:07:34 crc kubenswrapper[4798]: I1011 04:07:34.777945 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b62c0d6b-19d0-467d-a403-fec46d726560-config\") pod \"route-controller-manager-9ffdbc549-wsjnx\" (UID: \"b62c0d6b-19d0-467d-a403-fec46d726560\") " pod="openshift-route-controller-manager/route-controller-manager-9ffdbc549-wsjnx" Oct 11 04:07:34 crc kubenswrapper[4798]: I1011 04:07:34.778040 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/39993f52-84a6-46f3-a41c-69a0b491a4ea-serving-cert\") pod \"controller-manager-6d55bcf4fd-9h8gv\" (UID: \"39993f52-84a6-46f3-a41c-69a0b491a4ea\") " pod="openshift-controller-manager/controller-manager-6d55bcf4fd-9h8gv" Oct 11 04:07:34 crc kubenswrapper[4798]: I1011 04:07:34.778091 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b62c0d6b-19d0-467d-a403-fec46d726560-serving-cert\") pod \"route-controller-manager-9ffdbc549-wsjnx\" (UID: \"b62c0d6b-19d0-467d-a403-fec46d726560\") " pod="openshift-route-controller-manager/route-controller-manager-9ffdbc549-wsjnx" Oct 11 04:07:34 crc kubenswrapper[4798]: I1011 04:07:34.778253 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/b62c0d6b-19d0-467d-a403-fec46d726560-client-ca\") pod \"route-controller-manager-9ffdbc549-wsjnx\" (UID: \"b62c0d6b-19d0-467d-a403-fec46d726560\") " pod="openshift-route-controller-manager/route-controller-manager-9ffdbc549-wsjnx" Oct 11 04:07:34 crc kubenswrapper[4798]: I1011 04:07:34.778306 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vnc2s\" (UniqueName: \"kubernetes.io/projected/39993f52-84a6-46f3-a41c-69a0b491a4ea-kube-api-access-vnc2s\") pod \"controller-manager-6d55bcf4fd-9h8gv\" (UID: \"39993f52-84a6-46f3-a41c-69a0b491a4ea\") " pod="openshift-controller-manager/controller-manager-6d55bcf4fd-9h8gv" Oct 11 04:07:34 crc kubenswrapper[4798]: I1011 04:07:34.778353 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/39993f52-84a6-46f3-a41c-69a0b491a4ea-proxy-ca-bundles\") pod \"controller-manager-6d55bcf4fd-9h8gv\" (UID: \"39993f52-84a6-46f3-a41c-69a0b491a4ea\") " pod="openshift-controller-manager/controller-manager-6d55bcf4fd-9h8gv" Oct 11 04:07:34 crc kubenswrapper[4798]: I1011 04:07:34.778492 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/39993f52-84a6-46f3-a41c-69a0b491a4ea-config\") pod \"controller-manager-6d55bcf4fd-9h8gv\" (UID: \"39993f52-84a6-46f3-a41c-69a0b491a4ea\") " pod="openshift-controller-manager/controller-manager-6d55bcf4fd-9h8gv" Oct 11 04:07:34 crc kubenswrapper[4798]: I1011 04:07:34.778554 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/39993f52-84a6-46f3-a41c-69a0b491a4ea-client-ca\") pod \"controller-manager-6d55bcf4fd-9h8gv\" (UID: \"39993f52-84a6-46f3-a41c-69a0b491a4ea\") " pod="openshift-controller-manager/controller-manager-6d55bcf4fd-9h8gv" Oct 11 04:07:34 crc kubenswrapper[4798]: I1011 04:07:34.778634 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qq76j\" (UniqueName: \"kubernetes.io/projected/b62c0d6b-19d0-467d-a403-fec46d726560-kube-api-access-qq76j\") pod \"route-controller-manager-9ffdbc549-wsjnx\" (UID: \"b62c0d6b-19d0-467d-a403-fec46d726560\") " pod="openshift-route-controller-manager/route-controller-manager-9ffdbc549-wsjnx" Oct 11 04:07:34 crc kubenswrapper[4798]: I1011 04:07:34.879663 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/39993f52-84a6-46f3-a41c-69a0b491a4ea-proxy-ca-bundles\") pod \"controller-manager-6d55bcf4fd-9h8gv\" (UID: \"39993f52-84a6-46f3-a41c-69a0b491a4ea\") " pod="openshift-controller-manager/controller-manager-6d55bcf4fd-9h8gv" Oct 11 04:07:34 crc kubenswrapper[4798]: I1011 04:07:34.879729 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/39993f52-84a6-46f3-a41c-69a0b491a4ea-config\") pod \"controller-manager-6d55bcf4fd-9h8gv\" (UID: \"39993f52-84a6-46f3-a41c-69a0b491a4ea\") " pod="openshift-controller-manager/controller-manager-6d55bcf4fd-9h8gv" Oct 11 04:07:34 crc kubenswrapper[4798]: I1011 04:07:34.879753 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/39993f52-84a6-46f3-a41c-69a0b491a4ea-client-ca\") pod \"controller-manager-6d55bcf4fd-9h8gv\" (UID: \"39993f52-84a6-46f3-a41c-69a0b491a4ea\") " pod="openshift-controller-manager/controller-manager-6d55bcf4fd-9h8gv" Oct 11 04:07:34 crc kubenswrapper[4798]: I1011 04:07:34.879776 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qq76j\" (UniqueName: \"kubernetes.io/projected/b62c0d6b-19d0-467d-a403-fec46d726560-kube-api-access-qq76j\") pod \"route-controller-manager-9ffdbc549-wsjnx\" (UID: \"b62c0d6b-19d0-467d-a403-fec46d726560\") " pod="openshift-route-controller-manager/route-controller-manager-9ffdbc549-wsjnx" Oct 11 04:07:34 crc kubenswrapper[4798]: I1011 04:07:34.879839 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b62c0d6b-19d0-467d-a403-fec46d726560-config\") pod \"route-controller-manager-9ffdbc549-wsjnx\" (UID: \"b62c0d6b-19d0-467d-a403-fec46d726560\") " pod="openshift-route-controller-manager/route-controller-manager-9ffdbc549-wsjnx" Oct 11 04:07:34 crc kubenswrapper[4798]: I1011 04:07:34.879866 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/39993f52-84a6-46f3-a41c-69a0b491a4ea-serving-cert\") pod \"controller-manager-6d55bcf4fd-9h8gv\" (UID: \"39993f52-84a6-46f3-a41c-69a0b491a4ea\") " pod="openshift-controller-manager/controller-manager-6d55bcf4fd-9h8gv" Oct 11 04:07:34 crc kubenswrapper[4798]: I1011 04:07:34.879900 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b62c0d6b-19d0-467d-a403-fec46d726560-serving-cert\") pod \"route-controller-manager-9ffdbc549-wsjnx\" (UID: \"b62c0d6b-19d0-467d-a403-fec46d726560\") " pod="openshift-route-controller-manager/route-controller-manager-9ffdbc549-wsjnx" Oct 11 04:07:34 crc kubenswrapper[4798]: I1011 04:07:34.879917 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/b62c0d6b-19d0-467d-a403-fec46d726560-client-ca\") pod \"route-controller-manager-9ffdbc549-wsjnx\" (UID: \"b62c0d6b-19d0-467d-a403-fec46d726560\") " pod="openshift-route-controller-manager/route-controller-manager-9ffdbc549-wsjnx" Oct 11 04:07:34 crc kubenswrapper[4798]: I1011 04:07:34.879945 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vnc2s\" (UniqueName: \"kubernetes.io/projected/39993f52-84a6-46f3-a41c-69a0b491a4ea-kube-api-access-vnc2s\") pod \"controller-manager-6d55bcf4fd-9h8gv\" (UID: \"39993f52-84a6-46f3-a41c-69a0b491a4ea\") " pod="openshift-controller-manager/controller-manager-6d55bcf4fd-9h8gv" Oct 11 04:07:34 crc kubenswrapper[4798]: I1011 04:07:34.881259 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/39993f52-84a6-46f3-a41c-69a0b491a4ea-proxy-ca-bundles\") pod \"controller-manager-6d55bcf4fd-9h8gv\" (UID: \"39993f52-84a6-46f3-a41c-69a0b491a4ea\") " pod="openshift-controller-manager/controller-manager-6d55bcf4fd-9h8gv" Oct 11 04:07:34 crc kubenswrapper[4798]: I1011 04:07:34.881553 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/b62c0d6b-19d0-467d-a403-fec46d726560-client-ca\") pod \"route-controller-manager-9ffdbc549-wsjnx\" (UID: \"b62c0d6b-19d0-467d-a403-fec46d726560\") " pod="openshift-route-controller-manager/route-controller-manager-9ffdbc549-wsjnx" Oct 11 04:07:34 crc kubenswrapper[4798]: I1011 04:07:34.882067 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/39993f52-84a6-46f3-a41c-69a0b491a4ea-client-ca\") pod \"controller-manager-6d55bcf4fd-9h8gv\" (UID: \"39993f52-84a6-46f3-a41c-69a0b491a4ea\") " pod="openshift-controller-manager/controller-manager-6d55bcf4fd-9h8gv" Oct 11 04:07:34 crc kubenswrapper[4798]: I1011 04:07:34.882352 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b62c0d6b-19d0-467d-a403-fec46d726560-config\") pod \"route-controller-manager-9ffdbc549-wsjnx\" (UID: \"b62c0d6b-19d0-467d-a403-fec46d726560\") " pod="openshift-route-controller-manager/route-controller-manager-9ffdbc549-wsjnx" Oct 11 04:07:34 crc kubenswrapper[4798]: I1011 04:07:34.882592 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/39993f52-84a6-46f3-a41c-69a0b491a4ea-config\") pod \"controller-manager-6d55bcf4fd-9h8gv\" (UID: \"39993f52-84a6-46f3-a41c-69a0b491a4ea\") " pod="openshift-controller-manager/controller-manager-6d55bcf4fd-9h8gv" Oct 11 04:07:34 crc kubenswrapper[4798]: I1011 04:07:34.884900 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/39993f52-84a6-46f3-a41c-69a0b491a4ea-serving-cert\") pod \"controller-manager-6d55bcf4fd-9h8gv\" (UID: \"39993f52-84a6-46f3-a41c-69a0b491a4ea\") " pod="openshift-controller-manager/controller-manager-6d55bcf4fd-9h8gv" Oct 11 04:07:34 crc kubenswrapper[4798]: I1011 04:07:34.887077 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b62c0d6b-19d0-467d-a403-fec46d726560-serving-cert\") pod \"route-controller-manager-9ffdbc549-wsjnx\" (UID: \"b62c0d6b-19d0-467d-a403-fec46d726560\") " pod="openshift-route-controller-manager/route-controller-manager-9ffdbc549-wsjnx" Oct 11 04:07:34 crc kubenswrapper[4798]: I1011 04:07:34.900797 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vnc2s\" (UniqueName: \"kubernetes.io/projected/39993f52-84a6-46f3-a41c-69a0b491a4ea-kube-api-access-vnc2s\") pod \"controller-manager-6d55bcf4fd-9h8gv\" (UID: \"39993f52-84a6-46f3-a41c-69a0b491a4ea\") " pod="openshift-controller-manager/controller-manager-6d55bcf4fd-9h8gv" Oct 11 04:07:34 crc kubenswrapper[4798]: I1011 04:07:34.905835 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qq76j\" (UniqueName: \"kubernetes.io/projected/b62c0d6b-19d0-467d-a403-fec46d726560-kube-api-access-qq76j\") pod \"route-controller-manager-9ffdbc549-wsjnx\" (UID: \"b62c0d6b-19d0-467d-a403-fec46d726560\") " pod="openshift-route-controller-manager/route-controller-manager-9ffdbc549-wsjnx" Oct 11 04:07:35 crc kubenswrapper[4798]: I1011 04:07:35.062428 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6d55bcf4fd-9h8gv" Oct 11 04:07:35 crc kubenswrapper[4798]: I1011 04:07:35.109311 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-9ffdbc549-wsjnx" Oct 11 04:07:35 crc kubenswrapper[4798]: I1011 04:07:35.347255 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-9ffdbc549-wsjnx"] Oct 11 04:07:35 crc kubenswrapper[4798]: I1011 04:07:35.410967 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-9ffdbc549-wsjnx"] Oct 11 04:07:35 crc kubenswrapper[4798]: I1011 04:07:35.432763 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6c3e154e-469a-47c1-a71a-b09597e9d303" path="/var/lib/kubelet/pods/6c3e154e-469a-47c1-a71a-b09597e9d303/volumes" Oct 11 04:07:35 crc kubenswrapper[4798]: I1011 04:07:35.433817 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7b1d91a6-9b59-466c-a922-22825aac279b" path="/var/lib/kubelet/pods/7b1d91a6-9b59-466c-a922-22825aac279b/volumes" Oct 11 04:07:35 crc kubenswrapper[4798]: I1011 04:07:35.504267 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-6d55bcf4fd-9h8gv"] Oct 11 04:07:35 crc kubenswrapper[4798]: W1011 04:07:35.515095 4798 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod39993f52_84a6_46f3_a41c_69a0b491a4ea.slice/crio-bc262f05ebe52a521622ea8ca65278229d5532aa1b530d4c55a23c0e85d9ec0d WatchSource:0}: Error finding container bc262f05ebe52a521622ea8ca65278229d5532aa1b530d4c55a23c0e85d9ec0d: Status 404 returned error can't find the container with id bc262f05ebe52a521622ea8ca65278229d5532aa1b530d4c55a23c0e85d9ec0d Oct 11 04:07:36 crc kubenswrapper[4798]: I1011 04:07:36.263291 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-6d55bcf4fd-9h8gv" event={"ID":"39993f52-84a6-46f3-a41c-69a0b491a4ea","Type":"ContainerStarted","Data":"995e53b427b2c6d6dbc9bdd09876075ce58a110473b9c3535dfb0114dbec838a"} Oct 11 04:07:36 crc kubenswrapper[4798]: I1011 04:07:36.263843 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-6d55bcf4fd-9h8gv" Oct 11 04:07:36 crc kubenswrapper[4798]: I1011 04:07:36.263868 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-6d55bcf4fd-9h8gv" event={"ID":"39993f52-84a6-46f3-a41c-69a0b491a4ea","Type":"ContainerStarted","Data":"bc262f05ebe52a521622ea8ca65278229d5532aa1b530d4c55a23c0e85d9ec0d"} Oct 11 04:07:36 crc kubenswrapper[4798]: I1011 04:07:36.265883 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-9ffdbc549-wsjnx" event={"ID":"b62c0d6b-19d0-467d-a403-fec46d726560","Type":"ContainerStarted","Data":"8b089aecec79eb53d29ad13890d97cc66e24d912c04b4e39495b32bff1b66fdb"} Oct 11 04:07:36 crc kubenswrapper[4798]: I1011 04:07:36.265965 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-9ffdbc549-wsjnx" event={"ID":"b62c0d6b-19d0-467d-a403-fec46d726560","Type":"ContainerStarted","Data":"0881e727a9788eae8146766d4cb3db52476073784cd4aa945a3bd821873203c1"} Oct 11 04:07:36 crc kubenswrapper[4798]: I1011 04:07:36.267546 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-9ffdbc549-wsjnx" Oct 11 04:07:36 crc kubenswrapper[4798]: I1011 04:07:36.267708 4798 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-9ffdbc549-wsjnx" podUID="b62c0d6b-19d0-467d-a403-fec46d726560" containerName="route-controller-manager" containerID="cri-o://8b089aecec79eb53d29ad13890d97cc66e24d912c04b4e39495b32bff1b66fdb" gracePeriod=30 Oct 11 04:07:36 crc kubenswrapper[4798]: I1011 04:07:36.270452 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-6d55bcf4fd-9h8gv" Oct 11 04:07:36 crc kubenswrapper[4798]: I1011 04:07:36.300728 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-6d55bcf4fd-9h8gv" podStartSLOduration=3.300606264 podStartE2EDuration="3.300606264s" podCreationTimestamp="2025-10-11 04:07:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 04:07:36.290777145 +0000 UTC m=+751.627066831" watchObservedRunningTime="2025-10-11 04:07:36.300606264 +0000 UTC m=+751.636895950" Oct 11 04:07:36 crc kubenswrapper[4798]: I1011 04:07:36.312660 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-9ffdbc549-wsjnx" podStartSLOduration=3.312636984 podStartE2EDuration="3.312636984s" podCreationTimestamp="2025-10-11 04:07:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 04:07:36.310189527 +0000 UTC m=+751.646479223" watchObservedRunningTime="2025-10-11 04:07:36.312636984 +0000 UTC m=+751.648926680" Oct 11 04:07:36 crc kubenswrapper[4798]: I1011 04:07:36.318558 4798 patch_prober.go:28] interesting pod/route-controller-manager-9ffdbc549-wsjnx container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.54:8443/healthz\": read tcp 10.217.0.2:43814->10.217.0.54:8443: read: connection reset by peer" start-of-body= Oct 11 04:07:36 crc kubenswrapper[4798]: I1011 04:07:36.318619 4798 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-9ffdbc549-wsjnx" podUID="b62c0d6b-19d0-467d-a403-fec46d726560" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.54:8443/healthz\": read tcp 10.217.0.2:43814->10.217.0.54:8443: read: connection reset by peer" Oct 11 04:07:36 crc kubenswrapper[4798]: I1011 04:07:36.662130 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-route-controller-manager_route-controller-manager-9ffdbc549-wsjnx_b62c0d6b-19d0-467d-a403-fec46d726560/route-controller-manager/0.log" Oct 11 04:07:36 crc kubenswrapper[4798]: I1011 04:07:36.662484 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-9ffdbc549-wsjnx" Oct 11 04:07:36 crc kubenswrapper[4798]: I1011 04:07:36.695319 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7b7c5c9946-ml5qz"] Oct 11 04:07:36 crc kubenswrapper[4798]: E1011 04:07:36.695624 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b62c0d6b-19d0-467d-a403-fec46d726560" containerName="route-controller-manager" Oct 11 04:07:36 crc kubenswrapper[4798]: I1011 04:07:36.695643 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="b62c0d6b-19d0-467d-a403-fec46d726560" containerName="route-controller-manager" Oct 11 04:07:36 crc kubenswrapper[4798]: I1011 04:07:36.695752 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="b62c0d6b-19d0-467d-a403-fec46d726560" containerName="route-controller-manager" Oct 11 04:07:36 crc kubenswrapper[4798]: I1011 04:07:36.696155 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-7b7c5c9946-ml5qz" Oct 11 04:07:36 crc kubenswrapper[4798]: I1011 04:07:36.701414 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-index-nkfzg" Oct 11 04:07:36 crc kubenswrapper[4798]: I1011 04:07:36.701477 4798 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack-operators/openstack-operator-index-nkfzg" Oct 11 04:07:36 crc kubenswrapper[4798]: I1011 04:07:36.719082 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7b7c5c9946-ml5qz"] Oct 11 04:07:36 crc kubenswrapper[4798]: I1011 04:07:36.726350 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qq76j\" (UniqueName: \"kubernetes.io/projected/b62c0d6b-19d0-467d-a403-fec46d726560-kube-api-access-qq76j\") pod \"b62c0d6b-19d0-467d-a403-fec46d726560\" (UID: \"b62c0d6b-19d0-467d-a403-fec46d726560\") " Oct 11 04:07:36 crc kubenswrapper[4798]: I1011 04:07:36.726502 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b62c0d6b-19d0-467d-a403-fec46d726560-config\") pod \"b62c0d6b-19d0-467d-a403-fec46d726560\" (UID: \"b62c0d6b-19d0-467d-a403-fec46d726560\") " Oct 11 04:07:36 crc kubenswrapper[4798]: I1011 04:07:36.726548 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/b62c0d6b-19d0-467d-a403-fec46d726560-client-ca\") pod \"b62c0d6b-19d0-467d-a403-fec46d726560\" (UID: \"b62c0d6b-19d0-467d-a403-fec46d726560\") " Oct 11 04:07:36 crc kubenswrapper[4798]: I1011 04:07:36.726620 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b62c0d6b-19d0-467d-a403-fec46d726560-serving-cert\") pod \"b62c0d6b-19d0-467d-a403-fec46d726560\" (UID: \"b62c0d6b-19d0-467d-a403-fec46d726560\") " Oct 11 04:07:36 crc kubenswrapper[4798]: I1011 04:07:36.738288 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b62c0d6b-19d0-467d-a403-fec46d726560-config" (OuterVolumeSpecName: "config") pod "b62c0d6b-19d0-467d-a403-fec46d726560" (UID: "b62c0d6b-19d0-467d-a403-fec46d726560"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 04:07:36 crc kubenswrapper[4798]: I1011 04:07:36.739333 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b62c0d6b-19d0-467d-a403-fec46d726560-client-ca" (OuterVolumeSpecName: "client-ca") pod "b62c0d6b-19d0-467d-a403-fec46d726560" (UID: "b62c0d6b-19d0-467d-a403-fec46d726560"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 04:07:36 crc kubenswrapper[4798]: I1011 04:07:36.764679 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b62c0d6b-19d0-467d-a403-fec46d726560-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "b62c0d6b-19d0-467d-a403-fec46d726560" (UID: "b62c0d6b-19d0-467d-a403-fec46d726560"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:07:36 crc kubenswrapper[4798]: I1011 04:07:36.764714 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b62c0d6b-19d0-467d-a403-fec46d726560-kube-api-access-qq76j" (OuterVolumeSpecName: "kube-api-access-qq76j") pod "b62c0d6b-19d0-467d-a403-fec46d726560" (UID: "b62c0d6b-19d0-467d-a403-fec46d726560"). InnerVolumeSpecName "kube-api-access-qq76j". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 04:07:36 crc kubenswrapper[4798]: I1011 04:07:36.772679 4798 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack-operators/openstack-operator-index-nkfzg" Oct 11 04:07:36 crc kubenswrapper[4798]: I1011 04:07:36.828632 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e5a520ec-c5fe-42cb-adef-2f2ae183e984-serving-cert\") pod \"route-controller-manager-7b7c5c9946-ml5qz\" (UID: \"e5a520ec-c5fe-42cb-adef-2f2ae183e984\") " pod="openshift-route-controller-manager/route-controller-manager-7b7c5c9946-ml5qz" Oct 11 04:07:36 crc kubenswrapper[4798]: I1011 04:07:36.828760 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/e5a520ec-c5fe-42cb-adef-2f2ae183e984-client-ca\") pod \"route-controller-manager-7b7c5c9946-ml5qz\" (UID: \"e5a520ec-c5fe-42cb-adef-2f2ae183e984\") " pod="openshift-route-controller-manager/route-controller-manager-7b7c5c9946-ml5qz" Oct 11 04:07:36 crc kubenswrapper[4798]: I1011 04:07:36.828786 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e5a520ec-c5fe-42cb-adef-2f2ae183e984-config\") pod \"route-controller-manager-7b7c5c9946-ml5qz\" (UID: \"e5a520ec-c5fe-42cb-adef-2f2ae183e984\") " pod="openshift-route-controller-manager/route-controller-manager-7b7c5c9946-ml5qz" Oct 11 04:07:36 crc kubenswrapper[4798]: I1011 04:07:36.828804 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8nmdp\" (UniqueName: \"kubernetes.io/projected/e5a520ec-c5fe-42cb-adef-2f2ae183e984-kube-api-access-8nmdp\") pod \"route-controller-manager-7b7c5c9946-ml5qz\" (UID: \"e5a520ec-c5fe-42cb-adef-2f2ae183e984\") " pod="openshift-route-controller-manager/route-controller-manager-7b7c5c9946-ml5qz" Oct 11 04:07:36 crc kubenswrapper[4798]: I1011 04:07:36.828879 4798 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b62c0d6b-19d0-467d-a403-fec46d726560-config\") on node \"crc\" DevicePath \"\"" Oct 11 04:07:36 crc kubenswrapper[4798]: I1011 04:07:36.828891 4798 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/b62c0d6b-19d0-467d-a403-fec46d726560-client-ca\") on node \"crc\" DevicePath \"\"" Oct 11 04:07:36 crc kubenswrapper[4798]: I1011 04:07:36.828902 4798 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b62c0d6b-19d0-467d-a403-fec46d726560-serving-cert\") on node \"crc\" DevicePath \"\"" Oct 11 04:07:36 crc kubenswrapper[4798]: I1011 04:07:36.828910 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qq76j\" (UniqueName: \"kubernetes.io/projected/b62c0d6b-19d0-467d-a403-fec46d726560-kube-api-access-qq76j\") on node \"crc\" DevicePath \"\"" Oct 11 04:07:36 crc kubenswrapper[4798]: I1011 04:07:36.930626 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e5a520ec-c5fe-42cb-adef-2f2ae183e984-config\") pod \"route-controller-manager-7b7c5c9946-ml5qz\" (UID: \"e5a520ec-c5fe-42cb-adef-2f2ae183e984\") " pod="openshift-route-controller-manager/route-controller-manager-7b7c5c9946-ml5qz" Oct 11 04:07:36 crc kubenswrapper[4798]: I1011 04:07:36.930667 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/e5a520ec-c5fe-42cb-adef-2f2ae183e984-client-ca\") pod \"route-controller-manager-7b7c5c9946-ml5qz\" (UID: \"e5a520ec-c5fe-42cb-adef-2f2ae183e984\") " pod="openshift-route-controller-manager/route-controller-manager-7b7c5c9946-ml5qz" Oct 11 04:07:36 crc kubenswrapper[4798]: I1011 04:07:36.930689 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8nmdp\" (UniqueName: \"kubernetes.io/projected/e5a520ec-c5fe-42cb-adef-2f2ae183e984-kube-api-access-8nmdp\") pod \"route-controller-manager-7b7c5c9946-ml5qz\" (UID: \"e5a520ec-c5fe-42cb-adef-2f2ae183e984\") " pod="openshift-route-controller-manager/route-controller-manager-7b7c5c9946-ml5qz" Oct 11 04:07:36 crc kubenswrapper[4798]: I1011 04:07:36.930724 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e5a520ec-c5fe-42cb-adef-2f2ae183e984-serving-cert\") pod \"route-controller-manager-7b7c5c9946-ml5qz\" (UID: \"e5a520ec-c5fe-42cb-adef-2f2ae183e984\") " pod="openshift-route-controller-manager/route-controller-manager-7b7c5c9946-ml5qz" Oct 11 04:07:36 crc kubenswrapper[4798]: I1011 04:07:36.931994 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/e5a520ec-c5fe-42cb-adef-2f2ae183e984-client-ca\") pod \"route-controller-manager-7b7c5c9946-ml5qz\" (UID: \"e5a520ec-c5fe-42cb-adef-2f2ae183e984\") " pod="openshift-route-controller-manager/route-controller-manager-7b7c5c9946-ml5qz" Oct 11 04:07:36 crc kubenswrapper[4798]: I1011 04:07:36.932072 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e5a520ec-c5fe-42cb-adef-2f2ae183e984-config\") pod \"route-controller-manager-7b7c5c9946-ml5qz\" (UID: \"e5a520ec-c5fe-42cb-adef-2f2ae183e984\") " pod="openshift-route-controller-manager/route-controller-manager-7b7c5c9946-ml5qz" Oct 11 04:07:36 crc kubenswrapper[4798]: I1011 04:07:36.934809 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e5a520ec-c5fe-42cb-adef-2f2ae183e984-serving-cert\") pod \"route-controller-manager-7b7c5c9946-ml5qz\" (UID: \"e5a520ec-c5fe-42cb-adef-2f2ae183e984\") " pod="openshift-route-controller-manager/route-controller-manager-7b7c5c9946-ml5qz" Oct 11 04:07:36 crc kubenswrapper[4798]: I1011 04:07:36.948630 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8nmdp\" (UniqueName: \"kubernetes.io/projected/e5a520ec-c5fe-42cb-adef-2f2ae183e984-kube-api-access-8nmdp\") pod \"route-controller-manager-7b7c5c9946-ml5qz\" (UID: \"e5a520ec-c5fe-42cb-adef-2f2ae183e984\") " pod="openshift-route-controller-manager/route-controller-manager-7b7c5c9946-ml5qz" Oct 11 04:07:37 crc kubenswrapper[4798]: I1011 04:07:37.039002 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-7b7c5c9946-ml5qz" Oct 11 04:07:37 crc kubenswrapper[4798]: I1011 04:07:37.280032 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-route-controller-manager_route-controller-manager-9ffdbc549-wsjnx_b62c0d6b-19d0-467d-a403-fec46d726560/route-controller-manager/0.log" Oct 11 04:07:37 crc kubenswrapper[4798]: I1011 04:07:37.280104 4798 generic.go:334] "Generic (PLEG): container finished" podID="b62c0d6b-19d0-467d-a403-fec46d726560" containerID="8b089aecec79eb53d29ad13890d97cc66e24d912c04b4e39495b32bff1b66fdb" exitCode=255 Oct 11 04:07:37 crc kubenswrapper[4798]: I1011 04:07:37.280197 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-9ffdbc549-wsjnx" event={"ID":"b62c0d6b-19d0-467d-a403-fec46d726560","Type":"ContainerDied","Data":"8b089aecec79eb53d29ad13890d97cc66e24d912c04b4e39495b32bff1b66fdb"} Oct 11 04:07:37 crc kubenswrapper[4798]: I1011 04:07:37.280304 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-9ffdbc549-wsjnx" event={"ID":"b62c0d6b-19d0-467d-a403-fec46d726560","Type":"ContainerDied","Data":"0881e727a9788eae8146766d4cb3db52476073784cd4aa945a3bd821873203c1"} Oct 11 04:07:37 crc kubenswrapper[4798]: I1011 04:07:37.280347 4798 scope.go:117] "RemoveContainer" containerID="8b089aecec79eb53d29ad13890d97cc66e24d912c04b4e39495b32bff1b66fdb" Oct 11 04:07:37 crc kubenswrapper[4798]: I1011 04:07:37.280217 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-9ffdbc549-wsjnx" Oct 11 04:07:37 crc kubenswrapper[4798]: I1011 04:07:37.298618 4798 scope.go:117] "RemoveContainer" containerID="8b089aecec79eb53d29ad13890d97cc66e24d912c04b4e39495b32bff1b66fdb" Oct 11 04:07:37 crc kubenswrapper[4798]: E1011 04:07:37.299227 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8b089aecec79eb53d29ad13890d97cc66e24d912c04b4e39495b32bff1b66fdb\": container with ID starting with 8b089aecec79eb53d29ad13890d97cc66e24d912c04b4e39495b32bff1b66fdb not found: ID does not exist" containerID="8b089aecec79eb53d29ad13890d97cc66e24d912c04b4e39495b32bff1b66fdb" Oct 11 04:07:37 crc kubenswrapper[4798]: I1011 04:07:37.299289 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8b089aecec79eb53d29ad13890d97cc66e24d912c04b4e39495b32bff1b66fdb"} err="failed to get container status \"8b089aecec79eb53d29ad13890d97cc66e24d912c04b4e39495b32bff1b66fdb\": rpc error: code = NotFound desc = could not find container \"8b089aecec79eb53d29ad13890d97cc66e24d912c04b4e39495b32bff1b66fdb\": container with ID starting with 8b089aecec79eb53d29ad13890d97cc66e24d912c04b4e39495b32bff1b66fdb not found: ID does not exist" Oct 11 04:07:37 crc kubenswrapper[4798]: I1011 04:07:37.321852 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-index-nkfzg" Oct 11 04:07:37 crc kubenswrapper[4798]: I1011 04:07:37.325432 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-9ffdbc549-wsjnx"] Oct 11 04:07:37 crc kubenswrapper[4798]: I1011 04:07:37.331071 4798 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-9ffdbc549-wsjnx"] Oct 11 04:07:37 crc kubenswrapper[4798]: I1011 04:07:37.431128 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b62c0d6b-19d0-467d-a403-fec46d726560" path="/var/lib/kubelet/pods/b62c0d6b-19d0-467d-a403-fec46d726560/volumes" Oct 11 04:07:37 crc kubenswrapper[4798]: I1011 04:07:37.448633 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7b7c5c9946-ml5qz"] Oct 11 04:07:37 crc kubenswrapper[4798]: W1011 04:07:37.458856 4798 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode5a520ec_c5fe_42cb_adef_2f2ae183e984.slice/crio-1530c45899cce93d2c6408e0ef1090df9a1030e06df860809c39419a9a158a52 WatchSource:0}: Error finding container 1530c45899cce93d2c6408e0ef1090df9a1030e06df860809c39419a9a158a52: Status 404 returned error can't find the container with id 1530c45899cce93d2c6408e0ef1090df9a1030e06df860809c39419a9a158a52 Oct 11 04:07:38 crc kubenswrapper[4798]: I1011 04:07:38.290767 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-7b7c5c9946-ml5qz" event={"ID":"e5a520ec-c5fe-42cb-adef-2f2ae183e984","Type":"ContainerStarted","Data":"bbcb8fa6c60cd74547dd22cc46d5966d614ea3b9e95ca7da57fb154685b3a7ba"} Oct 11 04:07:38 crc kubenswrapper[4798]: I1011 04:07:38.291138 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-7b7c5c9946-ml5qz" event={"ID":"e5a520ec-c5fe-42cb-adef-2f2ae183e984","Type":"ContainerStarted","Data":"1530c45899cce93d2c6408e0ef1090df9a1030e06df860809c39419a9a158a52"} Oct 11 04:07:39 crc kubenswrapper[4798]: I1011 04:07:39.297058 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-7b7c5c9946-ml5qz" Oct 11 04:07:39 crc kubenswrapper[4798]: I1011 04:07:39.302039 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-7b7c5c9946-ml5qz" Oct 11 04:07:39 crc kubenswrapper[4798]: I1011 04:07:39.316882 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-7b7c5c9946-ml5qz" podStartSLOduration=4.316843535 podStartE2EDuration="4.316843535s" podCreationTimestamp="2025-10-11 04:07:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 04:07:38.308291705 +0000 UTC m=+753.644581391" watchObservedRunningTime="2025-10-11 04:07:39.316843535 +0000 UTC m=+754.653133221" Oct 11 04:07:40 crc kubenswrapper[4798]: I1011 04:07:40.757779 4798 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Oct 11 04:07:50 crc kubenswrapper[4798]: I1011 04:07:50.831198 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/712f67a2d8a6423b4809894d753e10e23e7b19dde1bc675906c8bdb3c5h4g77"] Oct 11 04:07:50 crc kubenswrapper[4798]: I1011 04:07:50.832973 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/712f67a2d8a6423b4809894d753e10e23e7b19dde1bc675906c8bdb3c5h4g77" Oct 11 04:07:50 crc kubenswrapper[4798]: I1011 04:07:50.835236 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"default-dockercfg-kz5t7" Oct 11 04:07:50 crc kubenswrapper[4798]: I1011 04:07:50.847601 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/712f67a2d8a6423b4809894d753e10e23e7b19dde1bc675906c8bdb3c5h4g77"] Oct 11 04:07:50 crc kubenswrapper[4798]: I1011 04:07:50.953991 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6db7r\" (UniqueName: \"kubernetes.io/projected/8fb1be05-a9d3-479c-880e-9e7c73efac29-kube-api-access-6db7r\") pod \"712f67a2d8a6423b4809894d753e10e23e7b19dde1bc675906c8bdb3c5h4g77\" (UID: \"8fb1be05-a9d3-479c-880e-9e7c73efac29\") " pod="openstack-operators/712f67a2d8a6423b4809894d753e10e23e7b19dde1bc675906c8bdb3c5h4g77" Oct 11 04:07:50 crc kubenswrapper[4798]: I1011 04:07:50.954070 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/8fb1be05-a9d3-479c-880e-9e7c73efac29-bundle\") pod \"712f67a2d8a6423b4809894d753e10e23e7b19dde1bc675906c8bdb3c5h4g77\" (UID: \"8fb1be05-a9d3-479c-880e-9e7c73efac29\") " pod="openstack-operators/712f67a2d8a6423b4809894d753e10e23e7b19dde1bc675906c8bdb3c5h4g77" Oct 11 04:07:50 crc kubenswrapper[4798]: I1011 04:07:50.954109 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/8fb1be05-a9d3-479c-880e-9e7c73efac29-util\") pod \"712f67a2d8a6423b4809894d753e10e23e7b19dde1bc675906c8bdb3c5h4g77\" (UID: \"8fb1be05-a9d3-479c-880e-9e7c73efac29\") " pod="openstack-operators/712f67a2d8a6423b4809894d753e10e23e7b19dde1bc675906c8bdb3c5h4g77" Oct 11 04:07:51 crc kubenswrapper[4798]: I1011 04:07:51.055269 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6db7r\" (UniqueName: \"kubernetes.io/projected/8fb1be05-a9d3-479c-880e-9e7c73efac29-kube-api-access-6db7r\") pod \"712f67a2d8a6423b4809894d753e10e23e7b19dde1bc675906c8bdb3c5h4g77\" (UID: \"8fb1be05-a9d3-479c-880e-9e7c73efac29\") " pod="openstack-operators/712f67a2d8a6423b4809894d753e10e23e7b19dde1bc675906c8bdb3c5h4g77" Oct 11 04:07:51 crc kubenswrapper[4798]: I1011 04:07:51.055336 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/8fb1be05-a9d3-479c-880e-9e7c73efac29-bundle\") pod \"712f67a2d8a6423b4809894d753e10e23e7b19dde1bc675906c8bdb3c5h4g77\" (UID: \"8fb1be05-a9d3-479c-880e-9e7c73efac29\") " pod="openstack-operators/712f67a2d8a6423b4809894d753e10e23e7b19dde1bc675906c8bdb3c5h4g77" Oct 11 04:07:51 crc kubenswrapper[4798]: I1011 04:07:51.055361 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/8fb1be05-a9d3-479c-880e-9e7c73efac29-util\") pod \"712f67a2d8a6423b4809894d753e10e23e7b19dde1bc675906c8bdb3c5h4g77\" (UID: \"8fb1be05-a9d3-479c-880e-9e7c73efac29\") " pod="openstack-operators/712f67a2d8a6423b4809894d753e10e23e7b19dde1bc675906c8bdb3c5h4g77" Oct 11 04:07:51 crc kubenswrapper[4798]: I1011 04:07:51.055930 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/8fb1be05-a9d3-479c-880e-9e7c73efac29-util\") pod \"712f67a2d8a6423b4809894d753e10e23e7b19dde1bc675906c8bdb3c5h4g77\" (UID: \"8fb1be05-a9d3-479c-880e-9e7c73efac29\") " pod="openstack-operators/712f67a2d8a6423b4809894d753e10e23e7b19dde1bc675906c8bdb3c5h4g77" Oct 11 04:07:51 crc kubenswrapper[4798]: I1011 04:07:51.056541 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/8fb1be05-a9d3-479c-880e-9e7c73efac29-bundle\") pod \"712f67a2d8a6423b4809894d753e10e23e7b19dde1bc675906c8bdb3c5h4g77\" (UID: \"8fb1be05-a9d3-479c-880e-9e7c73efac29\") " pod="openstack-operators/712f67a2d8a6423b4809894d753e10e23e7b19dde1bc675906c8bdb3c5h4g77" Oct 11 04:07:51 crc kubenswrapper[4798]: I1011 04:07:51.087168 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6db7r\" (UniqueName: \"kubernetes.io/projected/8fb1be05-a9d3-479c-880e-9e7c73efac29-kube-api-access-6db7r\") pod \"712f67a2d8a6423b4809894d753e10e23e7b19dde1bc675906c8bdb3c5h4g77\" (UID: \"8fb1be05-a9d3-479c-880e-9e7c73efac29\") " pod="openstack-operators/712f67a2d8a6423b4809894d753e10e23e7b19dde1bc675906c8bdb3c5h4g77" Oct 11 04:07:51 crc kubenswrapper[4798]: I1011 04:07:51.152754 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/712f67a2d8a6423b4809894d753e10e23e7b19dde1bc675906c8bdb3c5h4g77" Oct 11 04:07:51 crc kubenswrapper[4798]: I1011 04:07:51.606341 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/712f67a2d8a6423b4809894d753e10e23e7b19dde1bc675906c8bdb3c5h4g77"] Oct 11 04:07:51 crc kubenswrapper[4798]: W1011 04:07:51.614604 4798 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8fb1be05_a9d3_479c_880e_9e7c73efac29.slice/crio-87c13c21946674728e73d77c9360c360950e9cb6056a7c36b3d2901b24b3080d WatchSource:0}: Error finding container 87c13c21946674728e73d77c9360c360950e9cb6056a7c36b3d2901b24b3080d: Status 404 returned error can't find the container with id 87c13c21946674728e73d77c9360c360950e9cb6056a7c36b3d2901b24b3080d Oct 11 04:07:52 crc kubenswrapper[4798]: I1011 04:07:52.412159 4798 generic.go:334] "Generic (PLEG): container finished" podID="8fb1be05-a9d3-479c-880e-9e7c73efac29" containerID="431fc180a2322dc4b27d158501b54b5a6bc2ae8662e4a9fe21de5e92e2280a43" exitCode=0 Oct 11 04:07:52 crc kubenswrapper[4798]: I1011 04:07:52.412254 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/712f67a2d8a6423b4809894d753e10e23e7b19dde1bc675906c8bdb3c5h4g77" event={"ID":"8fb1be05-a9d3-479c-880e-9e7c73efac29","Type":"ContainerDied","Data":"431fc180a2322dc4b27d158501b54b5a6bc2ae8662e4a9fe21de5e92e2280a43"} Oct 11 04:07:52 crc kubenswrapper[4798]: I1011 04:07:52.412692 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/712f67a2d8a6423b4809894d753e10e23e7b19dde1bc675906c8bdb3c5h4g77" event={"ID":"8fb1be05-a9d3-479c-880e-9e7c73efac29","Type":"ContainerStarted","Data":"87c13c21946674728e73d77c9360c360950e9cb6056a7c36b3d2901b24b3080d"} Oct 11 04:07:53 crc kubenswrapper[4798]: I1011 04:07:53.420931 4798 generic.go:334] "Generic (PLEG): container finished" podID="8fb1be05-a9d3-479c-880e-9e7c73efac29" containerID="62b010ca870f7b952fcd6fb3c4e219cb4fe72fbd8ede1cfcf9b71425414b538a" exitCode=0 Oct 11 04:07:53 crc kubenswrapper[4798]: I1011 04:07:53.421015 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/712f67a2d8a6423b4809894d753e10e23e7b19dde1bc675906c8bdb3c5h4g77" event={"ID":"8fb1be05-a9d3-479c-880e-9e7c73efac29","Type":"ContainerDied","Data":"62b010ca870f7b952fcd6fb3c4e219cb4fe72fbd8ede1cfcf9b71425414b538a"} Oct 11 04:07:54 crc kubenswrapper[4798]: I1011 04:07:54.434371 4798 generic.go:334] "Generic (PLEG): container finished" podID="8fb1be05-a9d3-479c-880e-9e7c73efac29" containerID="19083f43a08366f7acf472f6c9062d1d717ca1aa37fa17fd13af2e5abf565d9c" exitCode=0 Oct 11 04:07:54 crc kubenswrapper[4798]: I1011 04:07:54.434436 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/712f67a2d8a6423b4809894d753e10e23e7b19dde1bc675906c8bdb3c5h4g77" event={"ID":"8fb1be05-a9d3-479c-880e-9e7c73efac29","Type":"ContainerDied","Data":"19083f43a08366f7acf472f6c9062d1d717ca1aa37fa17fd13af2e5abf565d9c"} Oct 11 04:07:55 crc kubenswrapper[4798]: I1011 04:07:55.791510 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/712f67a2d8a6423b4809894d753e10e23e7b19dde1bc675906c8bdb3c5h4g77" Oct 11 04:07:55 crc kubenswrapper[4798]: I1011 04:07:55.931302 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/8fb1be05-a9d3-479c-880e-9e7c73efac29-util\") pod \"8fb1be05-a9d3-479c-880e-9e7c73efac29\" (UID: \"8fb1be05-a9d3-479c-880e-9e7c73efac29\") " Oct 11 04:07:55 crc kubenswrapper[4798]: I1011 04:07:55.931956 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6db7r\" (UniqueName: \"kubernetes.io/projected/8fb1be05-a9d3-479c-880e-9e7c73efac29-kube-api-access-6db7r\") pod \"8fb1be05-a9d3-479c-880e-9e7c73efac29\" (UID: \"8fb1be05-a9d3-479c-880e-9e7c73efac29\") " Oct 11 04:07:55 crc kubenswrapper[4798]: I1011 04:07:55.932168 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/8fb1be05-a9d3-479c-880e-9e7c73efac29-bundle\") pod \"8fb1be05-a9d3-479c-880e-9e7c73efac29\" (UID: \"8fb1be05-a9d3-479c-880e-9e7c73efac29\") " Oct 11 04:07:55 crc kubenswrapper[4798]: I1011 04:07:55.932843 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8fb1be05-a9d3-479c-880e-9e7c73efac29-bundle" (OuterVolumeSpecName: "bundle") pod "8fb1be05-a9d3-479c-880e-9e7c73efac29" (UID: "8fb1be05-a9d3-479c-880e-9e7c73efac29"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 04:07:55 crc kubenswrapper[4798]: I1011 04:07:55.940599 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8fb1be05-a9d3-479c-880e-9e7c73efac29-kube-api-access-6db7r" (OuterVolumeSpecName: "kube-api-access-6db7r") pod "8fb1be05-a9d3-479c-880e-9e7c73efac29" (UID: "8fb1be05-a9d3-479c-880e-9e7c73efac29"). InnerVolumeSpecName "kube-api-access-6db7r". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 04:07:55 crc kubenswrapper[4798]: I1011 04:07:55.946021 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8fb1be05-a9d3-479c-880e-9e7c73efac29-util" (OuterVolumeSpecName: "util") pod "8fb1be05-a9d3-479c-880e-9e7c73efac29" (UID: "8fb1be05-a9d3-479c-880e-9e7c73efac29"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 04:07:56 crc kubenswrapper[4798]: I1011 04:07:56.034590 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6db7r\" (UniqueName: \"kubernetes.io/projected/8fb1be05-a9d3-479c-880e-9e7c73efac29-kube-api-access-6db7r\") on node \"crc\" DevicePath \"\"" Oct 11 04:07:56 crc kubenswrapper[4798]: I1011 04:07:56.034637 4798 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/8fb1be05-a9d3-479c-880e-9e7c73efac29-bundle\") on node \"crc\" DevicePath \"\"" Oct 11 04:07:56 crc kubenswrapper[4798]: I1011 04:07:56.034658 4798 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/8fb1be05-a9d3-479c-880e-9e7c73efac29-util\") on node \"crc\" DevicePath \"\"" Oct 11 04:07:56 crc kubenswrapper[4798]: I1011 04:07:56.447723 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/712f67a2d8a6423b4809894d753e10e23e7b19dde1bc675906c8bdb3c5h4g77" event={"ID":"8fb1be05-a9d3-479c-880e-9e7c73efac29","Type":"ContainerDied","Data":"87c13c21946674728e73d77c9360c360950e9cb6056a7c36b3d2901b24b3080d"} Oct 11 04:07:56 crc kubenswrapper[4798]: I1011 04:07:56.447784 4798 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="87c13c21946674728e73d77c9360c360950e9cb6056a7c36b3d2901b24b3080d" Oct 11 04:07:56 crc kubenswrapper[4798]: I1011 04:07:56.448198 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/712f67a2d8a6423b4809894d753e10e23e7b19dde1bc675906c8bdb3c5h4g77" Oct 11 04:07:59 crc kubenswrapper[4798]: I1011 04:07:59.722746 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-controller-operator-69766b5fb5-qk5q4"] Oct 11 04:07:59 crc kubenswrapper[4798]: E1011 04:07:59.723250 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8fb1be05-a9d3-479c-880e-9e7c73efac29" containerName="extract" Oct 11 04:07:59 crc kubenswrapper[4798]: I1011 04:07:59.723263 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="8fb1be05-a9d3-479c-880e-9e7c73efac29" containerName="extract" Oct 11 04:07:59 crc kubenswrapper[4798]: E1011 04:07:59.723271 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8fb1be05-a9d3-479c-880e-9e7c73efac29" containerName="util" Oct 11 04:07:59 crc kubenswrapper[4798]: I1011 04:07:59.723277 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="8fb1be05-a9d3-479c-880e-9e7c73efac29" containerName="util" Oct 11 04:07:59 crc kubenswrapper[4798]: E1011 04:07:59.723298 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8fb1be05-a9d3-479c-880e-9e7c73efac29" containerName="pull" Oct 11 04:07:59 crc kubenswrapper[4798]: I1011 04:07:59.723305 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="8fb1be05-a9d3-479c-880e-9e7c73efac29" containerName="pull" Oct 11 04:07:59 crc kubenswrapper[4798]: I1011 04:07:59.723421 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="8fb1be05-a9d3-479c-880e-9e7c73efac29" containerName="extract" Oct 11 04:07:59 crc kubenswrapper[4798]: I1011 04:07:59.724041 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-operator-69766b5fb5-qk5q4" Oct 11 04:07:59 crc kubenswrapper[4798]: I1011 04:07:59.731248 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-operator-dockercfg-pmxqf" Oct 11 04:07:59 crc kubenswrapper[4798]: I1011 04:07:59.753353 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-operator-69766b5fb5-qk5q4"] Oct 11 04:07:59 crc kubenswrapper[4798]: I1011 04:07:59.794450 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ctfq8\" (UniqueName: \"kubernetes.io/projected/6e988a50-121d-47e0-ad52-8a1139fc8ad6-kube-api-access-ctfq8\") pod \"openstack-operator-controller-operator-69766b5fb5-qk5q4\" (UID: \"6e988a50-121d-47e0-ad52-8a1139fc8ad6\") " pod="openstack-operators/openstack-operator-controller-operator-69766b5fb5-qk5q4" Oct 11 04:07:59 crc kubenswrapper[4798]: I1011 04:07:59.895913 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ctfq8\" (UniqueName: \"kubernetes.io/projected/6e988a50-121d-47e0-ad52-8a1139fc8ad6-kube-api-access-ctfq8\") pod \"openstack-operator-controller-operator-69766b5fb5-qk5q4\" (UID: \"6e988a50-121d-47e0-ad52-8a1139fc8ad6\") " pod="openstack-operators/openstack-operator-controller-operator-69766b5fb5-qk5q4" Oct 11 04:07:59 crc kubenswrapper[4798]: I1011 04:07:59.916543 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ctfq8\" (UniqueName: \"kubernetes.io/projected/6e988a50-121d-47e0-ad52-8a1139fc8ad6-kube-api-access-ctfq8\") pod \"openstack-operator-controller-operator-69766b5fb5-qk5q4\" (UID: \"6e988a50-121d-47e0-ad52-8a1139fc8ad6\") " pod="openstack-operators/openstack-operator-controller-operator-69766b5fb5-qk5q4" Oct 11 04:08:00 crc kubenswrapper[4798]: I1011 04:08:00.042097 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-operator-69766b5fb5-qk5q4" Oct 11 04:08:00 crc kubenswrapper[4798]: I1011 04:08:00.488418 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-operator-69766b5fb5-qk5q4"] Oct 11 04:08:01 crc kubenswrapper[4798]: I1011 04:08:01.508430 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-operator-69766b5fb5-qk5q4" event={"ID":"6e988a50-121d-47e0-ad52-8a1139fc8ad6","Type":"ContainerStarted","Data":"4a3ce4f138bf52ae5575166813c3899f4fa627347db822a971c146a34ec1c1ba"} Oct 11 04:08:04 crc kubenswrapper[4798]: I1011 04:08:04.539478 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-operator-69766b5fb5-qk5q4" event={"ID":"6e988a50-121d-47e0-ad52-8a1139fc8ad6","Type":"ContainerStarted","Data":"04e0cac562ca7f2e157ba0c415d1883097d45b33512acb0e84b5ff6dfb6a8314"} Oct 11 04:08:07 crc kubenswrapper[4798]: I1011 04:08:07.558148 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-operator-69766b5fb5-qk5q4" event={"ID":"6e988a50-121d-47e0-ad52-8a1139fc8ad6","Type":"ContainerStarted","Data":"20c30c298a76c3b39cf85526948b44ac14071dd33a8bca457137bb5159a8366d"} Oct 11 04:08:07 crc kubenswrapper[4798]: I1011 04:08:07.558766 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-operator-69766b5fb5-qk5q4" Oct 11 04:08:07 crc kubenswrapper[4798]: I1011 04:08:07.598104 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-controller-operator-69766b5fb5-qk5q4" podStartSLOduration=2.606716224 podStartE2EDuration="8.598082139s" podCreationTimestamp="2025-10-11 04:07:59 +0000 UTC" firstStartedPulling="2025-10-11 04:08:00.50062929 +0000 UTC m=+775.836918976" lastFinishedPulling="2025-10-11 04:08:06.491995175 +0000 UTC m=+781.828284891" observedRunningTime="2025-10-11 04:08:07.592259914 +0000 UTC m=+782.928549610" watchObservedRunningTime="2025-10-11 04:08:07.598082139 +0000 UTC m=+782.934371845" Oct 11 04:08:08 crc kubenswrapper[4798]: I1011 04:08:08.569446 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-operator-69766b5fb5-qk5q4" Oct 11 04:08:10 crc kubenswrapper[4798]: I1011 04:08:10.095001 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-7w2w4"] Oct 11 04:08:10 crc kubenswrapper[4798]: I1011 04:08:10.096408 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7w2w4" Oct 11 04:08:10 crc kubenswrapper[4798]: I1011 04:08:10.111899 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-7w2w4"] Oct 11 04:08:10 crc kubenswrapper[4798]: I1011 04:08:10.145785 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6abc8fc3-977c-42dd-9385-f86d5ff3878a-utilities\") pod \"certified-operators-7w2w4\" (UID: \"6abc8fc3-977c-42dd-9385-f86d5ff3878a\") " pod="openshift-marketplace/certified-operators-7w2w4" Oct 11 04:08:10 crc kubenswrapper[4798]: I1011 04:08:10.145870 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kl7tm\" (UniqueName: \"kubernetes.io/projected/6abc8fc3-977c-42dd-9385-f86d5ff3878a-kube-api-access-kl7tm\") pod \"certified-operators-7w2w4\" (UID: \"6abc8fc3-977c-42dd-9385-f86d5ff3878a\") " pod="openshift-marketplace/certified-operators-7w2w4" Oct 11 04:08:10 crc kubenswrapper[4798]: I1011 04:08:10.145916 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6abc8fc3-977c-42dd-9385-f86d5ff3878a-catalog-content\") pod \"certified-operators-7w2w4\" (UID: \"6abc8fc3-977c-42dd-9385-f86d5ff3878a\") " pod="openshift-marketplace/certified-operators-7w2w4" Oct 11 04:08:10 crc kubenswrapper[4798]: I1011 04:08:10.247610 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kl7tm\" (UniqueName: \"kubernetes.io/projected/6abc8fc3-977c-42dd-9385-f86d5ff3878a-kube-api-access-kl7tm\") pod \"certified-operators-7w2w4\" (UID: \"6abc8fc3-977c-42dd-9385-f86d5ff3878a\") " pod="openshift-marketplace/certified-operators-7w2w4" Oct 11 04:08:10 crc kubenswrapper[4798]: I1011 04:08:10.247693 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6abc8fc3-977c-42dd-9385-f86d5ff3878a-catalog-content\") pod \"certified-operators-7w2w4\" (UID: \"6abc8fc3-977c-42dd-9385-f86d5ff3878a\") " pod="openshift-marketplace/certified-operators-7w2w4" Oct 11 04:08:10 crc kubenswrapper[4798]: I1011 04:08:10.247769 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6abc8fc3-977c-42dd-9385-f86d5ff3878a-utilities\") pod \"certified-operators-7w2w4\" (UID: \"6abc8fc3-977c-42dd-9385-f86d5ff3878a\") " pod="openshift-marketplace/certified-operators-7w2w4" Oct 11 04:08:10 crc kubenswrapper[4798]: I1011 04:08:10.248275 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6abc8fc3-977c-42dd-9385-f86d5ff3878a-catalog-content\") pod \"certified-operators-7w2w4\" (UID: \"6abc8fc3-977c-42dd-9385-f86d5ff3878a\") " pod="openshift-marketplace/certified-operators-7w2w4" Oct 11 04:08:10 crc kubenswrapper[4798]: I1011 04:08:10.248286 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6abc8fc3-977c-42dd-9385-f86d5ff3878a-utilities\") pod \"certified-operators-7w2w4\" (UID: \"6abc8fc3-977c-42dd-9385-f86d5ff3878a\") " pod="openshift-marketplace/certified-operators-7w2w4" Oct 11 04:08:10 crc kubenswrapper[4798]: I1011 04:08:10.267270 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kl7tm\" (UniqueName: \"kubernetes.io/projected/6abc8fc3-977c-42dd-9385-f86d5ff3878a-kube-api-access-kl7tm\") pod \"certified-operators-7w2w4\" (UID: \"6abc8fc3-977c-42dd-9385-f86d5ff3878a\") " pod="openshift-marketplace/certified-operators-7w2w4" Oct 11 04:08:10 crc kubenswrapper[4798]: I1011 04:08:10.414275 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7w2w4" Oct 11 04:08:10 crc kubenswrapper[4798]: I1011 04:08:10.882304 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-7w2w4"] Oct 11 04:08:10 crc kubenswrapper[4798]: W1011 04:08:10.886880 4798 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6abc8fc3_977c_42dd_9385_f86d5ff3878a.slice/crio-010c41b5f4fdc5244a85321648bf07e0048a36ddf952912400d6e6d97c0cf9be WatchSource:0}: Error finding container 010c41b5f4fdc5244a85321648bf07e0048a36ddf952912400d6e6d97c0cf9be: Status 404 returned error can't find the container with id 010c41b5f4fdc5244a85321648bf07e0048a36ddf952912400d6e6d97c0cf9be Oct 11 04:08:11 crc kubenswrapper[4798]: I1011 04:08:11.587123 4798 generic.go:334] "Generic (PLEG): container finished" podID="6abc8fc3-977c-42dd-9385-f86d5ff3878a" containerID="8d3a8319f32aaff19534f7466610f33bd3cf31be3908af2b72126a67caf4ea6e" exitCode=0 Oct 11 04:08:11 crc kubenswrapper[4798]: I1011 04:08:11.587188 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-7w2w4" event={"ID":"6abc8fc3-977c-42dd-9385-f86d5ff3878a","Type":"ContainerDied","Data":"8d3a8319f32aaff19534f7466610f33bd3cf31be3908af2b72126a67caf4ea6e"} Oct 11 04:08:11 crc kubenswrapper[4798]: I1011 04:08:11.587227 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-7w2w4" event={"ID":"6abc8fc3-977c-42dd-9385-f86d5ff3878a","Type":"ContainerStarted","Data":"010c41b5f4fdc5244a85321648bf07e0048a36ddf952912400d6e6d97c0cf9be"} Oct 11 04:08:12 crc kubenswrapper[4798]: I1011 04:08:12.594719 4798 generic.go:334] "Generic (PLEG): container finished" podID="6abc8fc3-977c-42dd-9385-f86d5ff3878a" containerID="4c51a7a81fc7dcb6c294d893a12cdb10309b3d7ccc0a50866927ed7f9cb9bcda" exitCode=0 Oct 11 04:08:12 crc kubenswrapper[4798]: I1011 04:08:12.594810 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-7w2w4" event={"ID":"6abc8fc3-977c-42dd-9385-f86d5ff3878a","Type":"ContainerDied","Data":"4c51a7a81fc7dcb6c294d893a12cdb10309b3d7ccc0a50866927ed7f9cb9bcda"} Oct 11 04:08:13 crc kubenswrapper[4798]: I1011 04:08:13.603767 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-7w2w4" event={"ID":"6abc8fc3-977c-42dd-9385-f86d5ff3878a","Type":"ContainerStarted","Data":"edf697983540be9f8ac1a7f34e3f17faea99ef822b54cc296606c1a97941ecd4"} Oct 11 04:08:13 crc kubenswrapper[4798]: I1011 04:08:13.645205 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-7w2w4" podStartSLOduration=1.980439637 podStartE2EDuration="3.645184843s" podCreationTimestamp="2025-10-11 04:08:10 +0000 UTC" firstStartedPulling="2025-10-11 04:08:11.588874908 +0000 UTC m=+786.925164614" lastFinishedPulling="2025-10-11 04:08:13.253620134 +0000 UTC m=+788.589909820" observedRunningTime="2025-10-11 04:08:13.640463002 +0000 UTC m=+788.976752688" watchObservedRunningTime="2025-10-11 04:08:13.645184843 +0000 UTC m=+788.981474539" Oct 11 04:08:20 crc kubenswrapper[4798]: I1011 04:08:20.415036 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-7w2w4" Oct 11 04:08:20 crc kubenswrapper[4798]: I1011 04:08:20.415707 4798 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-7w2w4" Oct 11 04:08:20 crc kubenswrapper[4798]: I1011 04:08:20.453909 4798 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-7w2w4" Oct 11 04:08:20 crc kubenswrapper[4798]: I1011 04:08:20.682561 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-7w2w4" Oct 11 04:08:20 crc kubenswrapper[4798]: I1011 04:08:20.727097 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-7w2w4"] Oct 11 04:08:22 crc kubenswrapper[4798]: I1011 04:08:22.655738 4798 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-7w2w4" podUID="6abc8fc3-977c-42dd-9385-f86d5ff3878a" containerName="registry-server" containerID="cri-o://edf697983540be9f8ac1a7f34e3f17faea99ef822b54cc296606c1a97941ecd4" gracePeriod=2 Oct 11 04:08:23 crc kubenswrapper[4798]: I1011 04:08:23.075081 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7w2w4" Oct 11 04:08:23 crc kubenswrapper[4798]: I1011 04:08:23.090975 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-w7bgq"] Oct 11 04:08:23 crc kubenswrapper[4798]: E1011 04:08:23.091301 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6abc8fc3-977c-42dd-9385-f86d5ff3878a" containerName="registry-server" Oct 11 04:08:23 crc kubenswrapper[4798]: I1011 04:08:23.091326 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="6abc8fc3-977c-42dd-9385-f86d5ff3878a" containerName="registry-server" Oct 11 04:08:23 crc kubenswrapper[4798]: E1011 04:08:23.091344 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6abc8fc3-977c-42dd-9385-f86d5ff3878a" containerName="extract-utilities" Oct 11 04:08:23 crc kubenswrapper[4798]: I1011 04:08:23.091353 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="6abc8fc3-977c-42dd-9385-f86d5ff3878a" containerName="extract-utilities" Oct 11 04:08:23 crc kubenswrapper[4798]: E1011 04:08:23.091373 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6abc8fc3-977c-42dd-9385-f86d5ff3878a" containerName="extract-content" Oct 11 04:08:23 crc kubenswrapper[4798]: I1011 04:08:23.091382 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="6abc8fc3-977c-42dd-9385-f86d5ff3878a" containerName="extract-content" Oct 11 04:08:23 crc kubenswrapper[4798]: I1011 04:08:23.091564 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="6abc8fc3-977c-42dd-9385-f86d5ff3878a" containerName="registry-server" Oct 11 04:08:23 crc kubenswrapper[4798]: I1011 04:08:23.092717 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-w7bgq" Oct 11 04:08:23 crc kubenswrapper[4798]: I1011 04:08:23.112324 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-w7bgq"] Oct 11 04:08:23 crc kubenswrapper[4798]: I1011 04:08:23.160449 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6abc8fc3-977c-42dd-9385-f86d5ff3878a-catalog-content\") pod \"6abc8fc3-977c-42dd-9385-f86d5ff3878a\" (UID: \"6abc8fc3-977c-42dd-9385-f86d5ff3878a\") " Oct 11 04:08:23 crc kubenswrapper[4798]: I1011 04:08:23.160611 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kl7tm\" (UniqueName: \"kubernetes.io/projected/6abc8fc3-977c-42dd-9385-f86d5ff3878a-kube-api-access-kl7tm\") pod \"6abc8fc3-977c-42dd-9385-f86d5ff3878a\" (UID: \"6abc8fc3-977c-42dd-9385-f86d5ff3878a\") " Oct 11 04:08:23 crc kubenswrapper[4798]: I1011 04:08:23.160641 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6abc8fc3-977c-42dd-9385-f86d5ff3878a-utilities\") pod \"6abc8fc3-977c-42dd-9385-f86d5ff3878a\" (UID: \"6abc8fc3-977c-42dd-9385-f86d5ff3878a\") " Oct 11 04:08:23 crc kubenswrapper[4798]: I1011 04:08:23.160737 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/525c5514-1646-41bf-bda9-04cdd47ff203-utilities\") pod \"community-operators-w7bgq\" (UID: \"525c5514-1646-41bf-bda9-04cdd47ff203\") " pod="openshift-marketplace/community-operators-w7bgq" Oct 11 04:08:23 crc kubenswrapper[4798]: I1011 04:08:23.160798 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/525c5514-1646-41bf-bda9-04cdd47ff203-catalog-content\") pod \"community-operators-w7bgq\" (UID: \"525c5514-1646-41bf-bda9-04cdd47ff203\") " pod="openshift-marketplace/community-operators-w7bgq" Oct 11 04:08:23 crc kubenswrapper[4798]: I1011 04:08:23.160830 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xmpgh\" (UniqueName: \"kubernetes.io/projected/525c5514-1646-41bf-bda9-04cdd47ff203-kube-api-access-xmpgh\") pod \"community-operators-w7bgq\" (UID: \"525c5514-1646-41bf-bda9-04cdd47ff203\") " pod="openshift-marketplace/community-operators-w7bgq" Oct 11 04:08:23 crc kubenswrapper[4798]: I1011 04:08:23.162328 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6abc8fc3-977c-42dd-9385-f86d5ff3878a-utilities" (OuterVolumeSpecName: "utilities") pod "6abc8fc3-977c-42dd-9385-f86d5ff3878a" (UID: "6abc8fc3-977c-42dd-9385-f86d5ff3878a"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 04:08:23 crc kubenswrapper[4798]: I1011 04:08:23.168954 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6abc8fc3-977c-42dd-9385-f86d5ff3878a-kube-api-access-kl7tm" (OuterVolumeSpecName: "kube-api-access-kl7tm") pod "6abc8fc3-977c-42dd-9385-f86d5ff3878a" (UID: "6abc8fc3-977c-42dd-9385-f86d5ff3878a"). InnerVolumeSpecName "kube-api-access-kl7tm". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 04:08:23 crc kubenswrapper[4798]: I1011 04:08:23.262100 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/525c5514-1646-41bf-bda9-04cdd47ff203-catalog-content\") pod \"community-operators-w7bgq\" (UID: \"525c5514-1646-41bf-bda9-04cdd47ff203\") " pod="openshift-marketplace/community-operators-w7bgq" Oct 11 04:08:23 crc kubenswrapper[4798]: I1011 04:08:23.262168 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xmpgh\" (UniqueName: \"kubernetes.io/projected/525c5514-1646-41bf-bda9-04cdd47ff203-kube-api-access-xmpgh\") pod \"community-operators-w7bgq\" (UID: \"525c5514-1646-41bf-bda9-04cdd47ff203\") " pod="openshift-marketplace/community-operators-w7bgq" Oct 11 04:08:23 crc kubenswrapper[4798]: I1011 04:08:23.262317 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/525c5514-1646-41bf-bda9-04cdd47ff203-utilities\") pod \"community-operators-w7bgq\" (UID: \"525c5514-1646-41bf-bda9-04cdd47ff203\") " pod="openshift-marketplace/community-operators-w7bgq" Oct 11 04:08:23 crc kubenswrapper[4798]: I1011 04:08:23.262888 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/525c5514-1646-41bf-bda9-04cdd47ff203-utilities\") pod \"community-operators-w7bgq\" (UID: \"525c5514-1646-41bf-bda9-04cdd47ff203\") " pod="openshift-marketplace/community-operators-w7bgq" Oct 11 04:08:23 crc kubenswrapper[4798]: I1011 04:08:23.262992 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kl7tm\" (UniqueName: \"kubernetes.io/projected/6abc8fc3-977c-42dd-9385-f86d5ff3878a-kube-api-access-kl7tm\") on node \"crc\" DevicePath \"\"" Oct 11 04:08:23 crc kubenswrapper[4798]: I1011 04:08:23.263010 4798 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6abc8fc3-977c-42dd-9385-f86d5ff3878a-utilities\") on node \"crc\" DevicePath \"\"" Oct 11 04:08:23 crc kubenswrapper[4798]: I1011 04:08:23.263045 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/525c5514-1646-41bf-bda9-04cdd47ff203-catalog-content\") pod \"community-operators-w7bgq\" (UID: \"525c5514-1646-41bf-bda9-04cdd47ff203\") " pod="openshift-marketplace/community-operators-w7bgq" Oct 11 04:08:23 crc kubenswrapper[4798]: I1011 04:08:23.280806 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xmpgh\" (UniqueName: \"kubernetes.io/projected/525c5514-1646-41bf-bda9-04cdd47ff203-kube-api-access-xmpgh\") pod \"community-operators-w7bgq\" (UID: \"525c5514-1646-41bf-bda9-04cdd47ff203\") " pod="openshift-marketplace/community-operators-w7bgq" Oct 11 04:08:23 crc kubenswrapper[4798]: I1011 04:08:23.435573 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-w7bgq" Oct 11 04:08:23 crc kubenswrapper[4798]: I1011 04:08:23.671049 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7w2w4" Oct 11 04:08:23 crc kubenswrapper[4798]: I1011 04:08:23.671088 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-7w2w4" event={"ID":"6abc8fc3-977c-42dd-9385-f86d5ff3878a","Type":"ContainerDied","Data":"edf697983540be9f8ac1a7f34e3f17faea99ef822b54cc296606c1a97941ecd4"} Oct 11 04:08:23 crc kubenswrapper[4798]: I1011 04:08:23.671751 4798 scope.go:117] "RemoveContainer" containerID="edf697983540be9f8ac1a7f34e3f17faea99ef822b54cc296606c1a97941ecd4" Oct 11 04:08:23 crc kubenswrapper[4798]: I1011 04:08:23.671571 4798 generic.go:334] "Generic (PLEG): container finished" podID="6abc8fc3-977c-42dd-9385-f86d5ff3878a" containerID="edf697983540be9f8ac1a7f34e3f17faea99ef822b54cc296606c1a97941ecd4" exitCode=0 Oct 11 04:08:23 crc kubenswrapper[4798]: I1011 04:08:23.671913 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-7w2w4" event={"ID":"6abc8fc3-977c-42dd-9385-f86d5ff3878a","Type":"ContainerDied","Data":"010c41b5f4fdc5244a85321648bf07e0048a36ddf952912400d6e6d97c0cf9be"} Oct 11 04:08:23 crc kubenswrapper[4798]: I1011 04:08:23.709863 4798 scope.go:117] "RemoveContainer" containerID="4c51a7a81fc7dcb6c294d893a12cdb10309b3d7ccc0a50866927ed7f9cb9bcda" Oct 11 04:08:23 crc kubenswrapper[4798]: I1011 04:08:23.743994 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-w7bgq"] Oct 11 04:08:23 crc kubenswrapper[4798]: I1011 04:08:23.746442 4798 scope.go:117] "RemoveContainer" containerID="8d3a8319f32aaff19534f7466610f33bd3cf31be3908af2b72126a67caf4ea6e" Oct 11 04:08:23 crc kubenswrapper[4798]: W1011 04:08:23.756075 4798 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod525c5514_1646_41bf_bda9_04cdd47ff203.slice/crio-58f76a7d1e211328d1d535b8b23e1e6d35f1c2aed4911489c05a036aabac57b7 WatchSource:0}: Error finding container 58f76a7d1e211328d1d535b8b23e1e6d35f1c2aed4911489c05a036aabac57b7: Status 404 returned error can't find the container with id 58f76a7d1e211328d1d535b8b23e1e6d35f1c2aed4911489c05a036aabac57b7 Oct 11 04:08:23 crc kubenswrapper[4798]: I1011 04:08:23.764741 4798 scope.go:117] "RemoveContainer" containerID="edf697983540be9f8ac1a7f34e3f17faea99ef822b54cc296606c1a97941ecd4" Oct 11 04:08:23 crc kubenswrapper[4798]: E1011 04:08:23.765184 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"edf697983540be9f8ac1a7f34e3f17faea99ef822b54cc296606c1a97941ecd4\": container with ID starting with edf697983540be9f8ac1a7f34e3f17faea99ef822b54cc296606c1a97941ecd4 not found: ID does not exist" containerID="edf697983540be9f8ac1a7f34e3f17faea99ef822b54cc296606c1a97941ecd4" Oct 11 04:08:23 crc kubenswrapper[4798]: I1011 04:08:23.765225 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"edf697983540be9f8ac1a7f34e3f17faea99ef822b54cc296606c1a97941ecd4"} err="failed to get container status \"edf697983540be9f8ac1a7f34e3f17faea99ef822b54cc296606c1a97941ecd4\": rpc error: code = NotFound desc = could not find container \"edf697983540be9f8ac1a7f34e3f17faea99ef822b54cc296606c1a97941ecd4\": container with ID starting with edf697983540be9f8ac1a7f34e3f17faea99ef822b54cc296606c1a97941ecd4 not found: ID does not exist" Oct 11 04:08:23 crc kubenswrapper[4798]: I1011 04:08:23.765253 4798 scope.go:117] "RemoveContainer" containerID="4c51a7a81fc7dcb6c294d893a12cdb10309b3d7ccc0a50866927ed7f9cb9bcda" Oct 11 04:08:23 crc kubenswrapper[4798]: E1011 04:08:23.766447 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4c51a7a81fc7dcb6c294d893a12cdb10309b3d7ccc0a50866927ed7f9cb9bcda\": container with ID starting with 4c51a7a81fc7dcb6c294d893a12cdb10309b3d7ccc0a50866927ed7f9cb9bcda not found: ID does not exist" containerID="4c51a7a81fc7dcb6c294d893a12cdb10309b3d7ccc0a50866927ed7f9cb9bcda" Oct 11 04:08:23 crc kubenswrapper[4798]: I1011 04:08:23.766485 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4c51a7a81fc7dcb6c294d893a12cdb10309b3d7ccc0a50866927ed7f9cb9bcda"} err="failed to get container status \"4c51a7a81fc7dcb6c294d893a12cdb10309b3d7ccc0a50866927ed7f9cb9bcda\": rpc error: code = NotFound desc = could not find container \"4c51a7a81fc7dcb6c294d893a12cdb10309b3d7ccc0a50866927ed7f9cb9bcda\": container with ID starting with 4c51a7a81fc7dcb6c294d893a12cdb10309b3d7ccc0a50866927ed7f9cb9bcda not found: ID does not exist" Oct 11 04:08:23 crc kubenswrapper[4798]: I1011 04:08:23.766511 4798 scope.go:117] "RemoveContainer" containerID="8d3a8319f32aaff19534f7466610f33bd3cf31be3908af2b72126a67caf4ea6e" Oct 11 04:08:23 crc kubenswrapper[4798]: E1011 04:08:23.766865 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8d3a8319f32aaff19534f7466610f33bd3cf31be3908af2b72126a67caf4ea6e\": container with ID starting with 8d3a8319f32aaff19534f7466610f33bd3cf31be3908af2b72126a67caf4ea6e not found: ID does not exist" containerID="8d3a8319f32aaff19534f7466610f33bd3cf31be3908af2b72126a67caf4ea6e" Oct 11 04:08:23 crc kubenswrapper[4798]: I1011 04:08:23.766894 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8d3a8319f32aaff19534f7466610f33bd3cf31be3908af2b72126a67caf4ea6e"} err="failed to get container status \"8d3a8319f32aaff19534f7466610f33bd3cf31be3908af2b72126a67caf4ea6e\": rpc error: code = NotFound desc = could not find container \"8d3a8319f32aaff19534f7466610f33bd3cf31be3908af2b72126a67caf4ea6e\": container with ID starting with 8d3a8319f32aaff19534f7466610f33bd3cf31be3908af2b72126a67caf4ea6e not found: ID does not exist" Oct 11 04:08:23 crc kubenswrapper[4798]: I1011 04:08:23.877703 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6abc8fc3-977c-42dd-9385-f86d5ff3878a-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "6abc8fc3-977c-42dd-9385-f86d5ff3878a" (UID: "6abc8fc3-977c-42dd-9385-f86d5ff3878a"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 04:08:23 crc kubenswrapper[4798]: I1011 04:08:23.970661 4798 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6abc8fc3-977c-42dd-9385-f86d5ff3878a-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 11 04:08:24 crc kubenswrapper[4798]: I1011 04:08:24.019321 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-7w2w4"] Oct 11 04:08:24 crc kubenswrapper[4798]: I1011 04:08:24.024900 4798 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-7w2w4"] Oct 11 04:08:24 crc kubenswrapper[4798]: I1011 04:08:24.681314 4798 generic.go:334] "Generic (PLEG): container finished" podID="525c5514-1646-41bf-bda9-04cdd47ff203" containerID="d9236002e37cd6ef9db4c7e657e50e3ebaebc93d858723f8d37e0103b61c2e0a" exitCode=0 Oct 11 04:08:24 crc kubenswrapper[4798]: I1011 04:08:24.681419 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-w7bgq" event={"ID":"525c5514-1646-41bf-bda9-04cdd47ff203","Type":"ContainerDied","Data":"d9236002e37cd6ef9db4c7e657e50e3ebaebc93d858723f8d37e0103b61c2e0a"} Oct 11 04:08:24 crc kubenswrapper[4798]: I1011 04:08:24.681768 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-w7bgq" event={"ID":"525c5514-1646-41bf-bda9-04cdd47ff203","Type":"ContainerStarted","Data":"58f76a7d1e211328d1d535b8b23e1e6d35f1c2aed4911489c05a036aabac57b7"} Oct 11 04:08:25 crc kubenswrapper[4798]: I1011 04:08:25.433324 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6abc8fc3-977c-42dd-9385-f86d5ff3878a" path="/var/lib/kubelet/pods/6abc8fc3-977c-42dd-9385-f86d5ff3878a/volumes" Oct 11 04:08:25 crc kubenswrapper[4798]: I1011 04:08:25.690998 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-w7bgq" event={"ID":"525c5514-1646-41bf-bda9-04cdd47ff203","Type":"ContainerStarted","Data":"5cedbfed0b54d754af83af63d1aa74d559b19c0ba4b8074e21df42a7d73b61e6"} Oct 11 04:08:25 crc kubenswrapper[4798]: E1011 04:08:25.952168 4798 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod525c5514_1646_41bf_bda9_04cdd47ff203.slice/crio-conmon-5cedbfed0b54d754af83af63d1aa74d559b19c0ba4b8074e21df42a7d73b61e6.scope\": RecentStats: unable to find data in memory cache]" Oct 11 04:08:26 crc kubenswrapper[4798]: I1011 04:08:26.699782 4798 generic.go:334] "Generic (PLEG): container finished" podID="525c5514-1646-41bf-bda9-04cdd47ff203" containerID="5cedbfed0b54d754af83af63d1aa74d559b19c0ba4b8074e21df42a7d73b61e6" exitCode=0 Oct 11 04:08:26 crc kubenswrapper[4798]: I1011 04:08:26.699833 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-w7bgq" event={"ID":"525c5514-1646-41bf-bda9-04cdd47ff203","Type":"ContainerDied","Data":"5cedbfed0b54d754af83af63d1aa74d559b19c0ba4b8074e21df42a7d73b61e6"} Oct 11 04:08:28 crc kubenswrapper[4798]: I1011 04:08:28.714062 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-w7bgq" event={"ID":"525c5514-1646-41bf-bda9-04cdd47ff203","Type":"ContainerStarted","Data":"f7e7df1d6ec722d40cdd26dbe0c0b9d6e95d50697ca8680eab029b166782252d"} Oct 11 04:08:28 crc kubenswrapper[4798]: I1011 04:08:28.758816 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-w7bgq" podStartSLOduration=2.380650327 podStartE2EDuration="5.758799076s" podCreationTimestamp="2025-10-11 04:08:23 +0000 UTC" firstStartedPulling="2025-10-11 04:08:24.683266401 +0000 UTC m=+800.019556087" lastFinishedPulling="2025-10-11 04:08:28.06141514 +0000 UTC m=+803.397704836" observedRunningTime="2025-10-11 04:08:28.752353217 +0000 UTC m=+804.088642903" watchObservedRunningTime="2025-10-11 04:08:28.758799076 +0000 UTC m=+804.095088762" Oct 11 04:08:33 crc kubenswrapper[4798]: I1011 04:08:33.438425 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-w7bgq" Oct 11 04:08:33 crc kubenswrapper[4798]: I1011 04:08:33.438835 4798 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-w7bgq" Oct 11 04:08:33 crc kubenswrapper[4798]: I1011 04:08:33.482038 4798 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-w7bgq" Oct 11 04:08:33 crc kubenswrapper[4798]: I1011 04:08:33.799051 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-w7bgq" Oct 11 04:08:33 crc kubenswrapper[4798]: I1011 04:08:33.857512 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-w7bgq"] Oct 11 04:08:35 crc kubenswrapper[4798]: I1011 04:08:35.758506 4798 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-w7bgq" podUID="525c5514-1646-41bf-bda9-04cdd47ff203" containerName="registry-server" containerID="cri-o://f7e7df1d6ec722d40cdd26dbe0c0b9d6e95d50697ca8680eab029b166782252d" gracePeriod=2 Oct 11 04:08:35 crc kubenswrapper[4798]: I1011 04:08:35.921331 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/barbican-operator-controller-manager-64f84fcdbb-pvqqn"] Oct 11 04:08:35 crc kubenswrapper[4798]: I1011 04:08:35.938711 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-64f84fcdbb-pvqqn"] Oct 11 04:08:35 crc kubenswrapper[4798]: I1011 04:08:35.938849 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-64f84fcdbb-pvqqn" Oct 11 04:08:35 crc kubenswrapper[4798]: I1011 04:08:35.942658 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"barbican-operator-controller-manager-dockercfg-h9gn7" Oct 11 04:08:35 crc kubenswrapper[4798]: I1011 04:08:35.946485 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/cinder-operator-controller-manager-59cdc64769-9cgvk"] Oct 11 04:08:35 crc kubenswrapper[4798]: I1011 04:08:35.948104 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/cinder-operator-controller-manager-59cdc64769-9cgvk" Oct 11 04:08:35 crc kubenswrapper[4798]: I1011 04:08:35.950179 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"cinder-operator-controller-manager-dockercfg-dwftv" Oct 11 04:08:35 crc kubenswrapper[4798]: I1011 04:08:35.951288 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/cinder-operator-controller-manager-59cdc64769-9cgvk"] Oct 11 04:08:35 crc kubenswrapper[4798]: I1011 04:08:35.959417 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/designate-operator-controller-manager-687df44cdb-x5n77"] Oct 11 04:08:35 crc kubenswrapper[4798]: I1011 04:08:35.961087 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/designate-operator-controller-manager-687df44cdb-x5n77" Oct 11 04:08:35 crc kubenswrapper[4798]: I1011 04:08:35.964643 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"designate-operator-controller-manager-dockercfg-d8cz2" Oct 11 04:08:35 crc kubenswrapper[4798]: I1011 04:08:35.991564 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/glance-operator-controller-manager-7f5fc6b5ff-4zk9f"] Oct 11 04:08:35 crc kubenswrapper[4798]: I1011 04:08:35.993836 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-7f5fc6b5ff-4zk9f" Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.000367 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"glance-operator-controller-manager-dockercfg-j658k" Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.006993 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/designate-operator-controller-manager-687df44cdb-x5n77"] Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.019950 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-7f5fc6b5ff-4zk9f"] Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.059931 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w5r67\" (UniqueName: \"kubernetes.io/projected/755c871f-8030-429f-9b9f-5b1a6c1e24e0-kube-api-access-w5r67\") pod \"cinder-operator-controller-manager-59cdc64769-9cgvk\" (UID: \"755c871f-8030-429f-9b9f-5b1a6c1e24e0\") " pod="openstack-operators/cinder-operator-controller-manager-59cdc64769-9cgvk" Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.059995 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pnxp9\" (UniqueName: \"kubernetes.io/projected/7c70459a-7eb7-45e3-b6c0-e2b3c01e3cb7-kube-api-access-pnxp9\") pod \"barbican-operator-controller-manager-64f84fcdbb-pvqqn\" (UID: \"7c70459a-7eb7-45e3-b6c0-e2b3c01e3cb7\") " pod="openstack-operators/barbican-operator-controller-manager-64f84fcdbb-pvqqn" Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.090742 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/heat-operator-controller-manager-6d9967f8dd-lgszb"] Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.093112 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/heat-operator-controller-manager-6d9967f8dd-lgszb" Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.097259 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"heat-operator-controller-manager-dockercfg-mlnl4" Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.113218 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/horizon-operator-controller-manager-6d74794d9b-vpg7r"] Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.120822 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-6d74794d9b-vpg7r" Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.128319 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"horizon-operator-controller-manager-dockercfg-t2tg2" Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.162450 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w5r67\" (UniqueName: \"kubernetes.io/projected/755c871f-8030-429f-9b9f-5b1a6c1e24e0-kube-api-access-w5r67\") pod \"cinder-operator-controller-manager-59cdc64769-9cgvk\" (UID: \"755c871f-8030-429f-9b9f-5b1a6c1e24e0\") " pod="openstack-operators/cinder-operator-controller-manager-59cdc64769-9cgvk" Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.162514 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pnxp9\" (UniqueName: \"kubernetes.io/projected/7c70459a-7eb7-45e3-b6c0-e2b3c01e3cb7-kube-api-access-pnxp9\") pod \"barbican-operator-controller-manager-64f84fcdbb-pvqqn\" (UID: \"7c70459a-7eb7-45e3-b6c0-e2b3c01e3cb7\") " pod="openstack-operators/barbican-operator-controller-manager-64f84fcdbb-pvqqn" Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.162541 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w24b6\" (UniqueName: \"kubernetes.io/projected/4490c7b3-a500-4224-8597-abd71de4fa13-kube-api-access-w24b6\") pod \"glance-operator-controller-manager-7f5fc6b5ff-4zk9f\" (UID: \"4490c7b3-a500-4224-8597-abd71de4fa13\") " pod="openstack-operators/glance-operator-controller-manager-7f5fc6b5ff-4zk9f" Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.162586 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n9q9z\" (UniqueName: \"kubernetes.io/projected/2c18a2f7-9a42-495c-bc9c-86750a381f5c-kube-api-access-n9q9z\") pod \"designate-operator-controller-manager-687df44cdb-x5n77\" (UID: \"2c18a2f7-9a42-495c-bc9c-86750a381f5c\") " pod="openstack-operators/designate-operator-controller-manager-687df44cdb-x5n77" Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.207560 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w5r67\" (UniqueName: \"kubernetes.io/projected/755c871f-8030-429f-9b9f-5b1a6c1e24e0-kube-api-access-w5r67\") pod \"cinder-operator-controller-manager-59cdc64769-9cgvk\" (UID: \"755c871f-8030-429f-9b9f-5b1a6c1e24e0\") " pod="openstack-operators/cinder-operator-controller-manager-59cdc64769-9cgvk" Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.213799 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pnxp9\" (UniqueName: \"kubernetes.io/projected/7c70459a-7eb7-45e3-b6c0-e2b3c01e3cb7-kube-api-access-pnxp9\") pod \"barbican-operator-controller-manager-64f84fcdbb-pvqqn\" (UID: \"7c70459a-7eb7-45e3-b6c0-e2b3c01e3cb7\") " pod="openstack-operators/barbican-operator-controller-manager-64f84fcdbb-pvqqn" Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.214533 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-6d74794d9b-vpg7r"] Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.233646 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/heat-operator-controller-manager-6d9967f8dd-lgszb"] Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.250841 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/infra-operator-controller-manager-585fc5b659-zsdqq"] Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.252286 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-585fc5b659-zsdqq" Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.263024 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-controller-manager-dockercfg-jtpnt" Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.263598 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-webhook-server-cert" Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.265081 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w24b6\" (UniqueName: \"kubernetes.io/projected/4490c7b3-a500-4224-8597-abd71de4fa13-kube-api-access-w24b6\") pod \"glance-operator-controller-manager-7f5fc6b5ff-4zk9f\" (UID: \"4490c7b3-a500-4224-8597-abd71de4fa13\") " pod="openstack-operators/glance-operator-controller-manager-7f5fc6b5ff-4zk9f" Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.266755 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b86b7\" (UniqueName: \"kubernetes.io/projected/cc1b32c4-acec-47f4-95ce-88763c33ca81-kube-api-access-b86b7\") pod \"heat-operator-controller-manager-6d9967f8dd-lgszb\" (UID: \"cc1b32c4-acec-47f4-95ce-88763c33ca81\") " pod="openstack-operators/heat-operator-controller-manager-6d9967f8dd-lgszb" Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.266901 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k544d\" (UniqueName: \"kubernetes.io/projected/332f7594-84b2-4761-a067-a32c31469e4f-kube-api-access-k544d\") pod \"horizon-operator-controller-manager-6d74794d9b-vpg7r\" (UID: \"332f7594-84b2-4761-a067-a32c31469e4f\") " pod="openstack-operators/horizon-operator-controller-manager-6d74794d9b-vpg7r" Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.267005 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n9q9z\" (UniqueName: \"kubernetes.io/projected/2c18a2f7-9a42-495c-bc9c-86750a381f5c-kube-api-access-n9q9z\") pod \"designate-operator-controller-manager-687df44cdb-x5n77\" (UID: \"2c18a2f7-9a42-495c-bc9c-86750a381f5c\") " pod="openstack-operators/designate-operator-controller-manager-687df44cdb-x5n77" Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.267308 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rwstx\" (UniqueName: \"kubernetes.io/projected/01834c42-7ee0-4576-9136-5812fc37e1cc-kube-api-access-rwstx\") pod \"infra-operator-controller-manager-585fc5b659-zsdqq\" (UID: \"01834c42-7ee0-4576-9136-5812fc37e1cc\") " pod="openstack-operators/infra-operator-controller-manager-585fc5b659-zsdqq" Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.267443 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/01834c42-7ee0-4576-9136-5812fc37e1cc-cert\") pod \"infra-operator-controller-manager-585fc5b659-zsdqq\" (UID: \"01834c42-7ee0-4576-9136-5812fc37e1cc\") " pod="openstack-operators/infra-operator-controller-manager-585fc5b659-zsdqq" Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.272790 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-64f84fcdbb-pvqqn" Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.288511 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-585fc5b659-zsdqq"] Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.296273 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/cinder-operator-controller-manager-59cdc64769-9cgvk" Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.321598 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ironic-operator-controller-manager-74cb5cbc49-p96zk"] Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.327361 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w24b6\" (UniqueName: \"kubernetes.io/projected/4490c7b3-a500-4224-8597-abd71de4fa13-kube-api-access-w24b6\") pod \"glance-operator-controller-manager-7f5fc6b5ff-4zk9f\" (UID: \"4490c7b3-a500-4224-8597-abd71de4fa13\") " pod="openstack-operators/glance-operator-controller-manager-7f5fc6b5ff-4zk9f" Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.342504 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ironic-operator-controller-manager-74cb5cbc49-p96zk"] Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.342873 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/keystone-operator-controller-manager-ddb98f99b-npmgn"] Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.344150 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-ddb98f99b-npmgn" Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.344596 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ironic-operator-controller-manager-74cb5cbc49-p96zk" Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.346473 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"keystone-operator-controller-manager-dockercfg-fbj7t" Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.346666 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ironic-operator-controller-manager-dockercfg-wwck5" Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.356791 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/manila-operator-controller-manager-59578bc799-7kfp8"] Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.357921 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-59578bc799-7kfp8" Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.360374 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-7f5fc6b5ff-4zk9f" Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.361578 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-ddb98f99b-npmgn"] Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.363767 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n9q9z\" (UniqueName: \"kubernetes.io/projected/2c18a2f7-9a42-495c-bc9c-86750a381f5c-kube-api-access-n9q9z\") pod \"designate-operator-controller-manager-687df44cdb-x5n77\" (UID: \"2c18a2f7-9a42-495c-bc9c-86750a381f5c\") " pod="openstack-operators/designate-operator-controller-manager-687df44cdb-x5n77" Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.363812 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"manila-operator-controller-manager-dockercfg-52mmp" Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.370648 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b86b7\" (UniqueName: \"kubernetes.io/projected/cc1b32c4-acec-47f4-95ce-88763c33ca81-kube-api-access-b86b7\") pod \"heat-operator-controller-manager-6d9967f8dd-lgszb\" (UID: \"cc1b32c4-acec-47f4-95ce-88763c33ca81\") " pod="openstack-operators/heat-operator-controller-manager-6d9967f8dd-lgszb" Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.370707 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6sj5f\" (UniqueName: \"kubernetes.io/projected/a168d198-c58b-4674-8286-72fb306036b2-kube-api-access-6sj5f\") pod \"ironic-operator-controller-manager-74cb5cbc49-p96zk\" (UID: \"a168d198-c58b-4674-8286-72fb306036b2\") " pod="openstack-operators/ironic-operator-controller-manager-74cb5cbc49-p96zk" Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.370749 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k544d\" (UniqueName: \"kubernetes.io/projected/332f7594-84b2-4761-a067-a32c31469e4f-kube-api-access-k544d\") pod \"horizon-operator-controller-manager-6d74794d9b-vpg7r\" (UID: \"332f7594-84b2-4761-a067-a32c31469e4f\") " pod="openstack-operators/horizon-operator-controller-manager-6d74794d9b-vpg7r" Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.370794 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rwstx\" (UniqueName: \"kubernetes.io/projected/01834c42-7ee0-4576-9136-5812fc37e1cc-kube-api-access-rwstx\") pod \"infra-operator-controller-manager-585fc5b659-zsdqq\" (UID: \"01834c42-7ee0-4576-9136-5812fc37e1cc\") " pod="openstack-operators/infra-operator-controller-manager-585fc5b659-zsdqq" Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.370817 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/01834c42-7ee0-4576-9136-5812fc37e1cc-cert\") pod \"infra-operator-controller-manager-585fc5b659-zsdqq\" (UID: \"01834c42-7ee0-4576-9136-5812fc37e1cc\") " pod="openstack-operators/infra-operator-controller-manager-585fc5b659-zsdqq" Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.370870 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gxqvp\" (UniqueName: \"kubernetes.io/projected/2fe367a2-f6f5-47ea-97a5-75a8b79778fb-kube-api-access-gxqvp\") pod \"keystone-operator-controller-manager-ddb98f99b-npmgn\" (UID: \"2fe367a2-f6f5-47ea-97a5-75a8b79778fb\") " pod="openstack-operators/keystone-operator-controller-manager-ddb98f99b-npmgn" Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.370898 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9k2k6\" (UniqueName: \"kubernetes.io/projected/2448a4a6-cf4c-45ec-89cc-25621f444988-kube-api-access-9k2k6\") pod \"manila-operator-controller-manager-59578bc799-7kfp8\" (UID: \"2448a4a6-cf4c-45ec-89cc-25621f444988\") " pod="openstack-operators/manila-operator-controller-manager-59578bc799-7kfp8" Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.377378 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-m7ldp"] Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.378113 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/01834c42-7ee0-4576-9136-5812fc37e1cc-cert\") pod \"infra-operator-controller-manager-585fc5b659-zsdqq\" (UID: \"01834c42-7ee0-4576-9136-5812fc37e1cc\") " pod="openstack-operators/infra-operator-controller-manager-585fc5b659-zsdqq" Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.379236 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-m7ldp" Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.387524 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-5777b4f897-l86k6"] Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.388939 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-5777b4f897-l86k6" Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.391464 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-59578bc799-7kfp8"] Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.392696 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"mariadb-operator-controller-manager-dockercfg-djqqq" Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.407093 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b86b7\" (UniqueName: \"kubernetes.io/projected/cc1b32c4-acec-47f4-95ce-88763c33ca81-kube-api-access-b86b7\") pod \"heat-operator-controller-manager-6d9967f8dd-lgszb\" (UID: \"cc1b32c4-acec-47f4-95ce-88763c33ca81\") " pod="openstack-operators/heat-operator-controller-manager-6d9967f8dd-lgszb" Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.410945 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k544d\" (UniqueName: \"kubernetes.io/projected/332f7594-84b2-4761-a067-a32c31469e4f-kube-api-access-k544d\") pod \"horizon-operator-controller-manager-6d74794d9b-vpg7r\" (UID: \"332f7594-84b2-4761-a067-a32c31469e4f\") " pod="openstack-operators/horizon-operator-controller-manager-6d74794d9b-vpg7r" Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.416962 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rwstx\" (UniqueName: \"kubernetes.io/projected/01834c42-7ee0-4576-9136-5812fc37e1cc-kube-api-access-rwstx\") pod \"infra-operator-controller-manager-585fc5b659-zsdqq\" (UID: \"01834c42-7ee0-4576-9136-5812fc37e1cc\") " pod="openstack-operators/infra-operator-controller-manager-585fc5b659-zsdqq" Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.433472 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-5777b4f897-l86k6"] Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.464725 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/neutron-operator-controller-manager-797d478b46-rwhzb"] Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.468276 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/neutron-operator-controller-manager-797d478b46-rwhzb" Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.474132 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dbm8m\" (UniqueName: \"kubernetes.io/projected/48b2c137-9726-4e99-b0ca-a3cdb3bc6a1b-kube-api-access-dbm8m\") pod \"neutron-operator-controller-manager-797d478b46-rwhzb\" (UID: \"48b2c137-9726-4e99-b0ca-a3cdb3bc6a1b\") " pod="openstack-operators/neutron-operator-controller-manager-797d478b46-rwhzb" Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.474185 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/09559c43-2ce0-4bcf-a5f5-b30a42d5fbd9-utilities\") pod \"redhat-operators-m7ldp\" (UID: \"09559c43-2ce0-4bcf-a5f5-b30a42d5fbd9\") " pod="openshift-marketplace/redhat-operators-m7ldp" Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.474230 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6sj5f\" (UniqueName: \"kubernetes.io/projected/a168d198-c58b-4674-8286-72fb306036b2-kube-api-access-6sj5f\") pod \"ironic-operator-controller-manager-74cb5cbc49-p96zk\" (UID: \"a168d198-c58b-4674-8286-72fb306036b2\") " pod="openstack-operators/ironic-operator-controller-manager-74cb5cbc49-p96zk" Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.474278 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d4qbm\" (UniqueName: \"kubernetes.io/projected/e9fb9720-fc76-47a8-b111-63b5bc2899da-kube-api-access-d4qbm\") pod \"mariadb-operator-controller-manager-5777b4f897-l86k6\" (UID: \"e9fb9720-fc76-47a8-b111-63b5bc2899da\") " pod="openstack-operators/mariadb-operator-controller-manager-5777b4f897-l86k6" Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.474297 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/09559c43-2ce0-4bcf-a5f5-b30a42d5fbd9-catalog-content\") pod \"redhat-operators-m7ldp\" (UID: \"09559c43-2ce0-4bcf-a5f5-b30a42d5fbd9\") " pod="openshift-marketplace/redhat-operators-m7ldp" Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.474355 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gxqvp\" (UniqueName: \"kubernetes.io/projected/2fe367a2-f6f5-47ea-97a5-75a8b79778fb-kube-api-access-gxqvp\") pod \"keystone-operator-controller-manager-ddb98f99b-npmgn\" (UID: \"2fe367a2-f6f5-47ea-97a5-75a8b79778fb\") " pod="openstack-operators/keystone-operator-controller-manager-ddb98f99b-npmgn" Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.474376 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9k2k6\" (UniqueName: \"kubernetes.io/projected/2448a4a6-cf4c-45ec-89cc-25621f444988-kube-api-access-9k2k6\") pod \"manila-operator-controller-manager-59578bc799-7kfp8\" (UID: \"2448a4a6-cf4c-45ec-89cc-25621f444988\") " pod="openstack-operators/manila-operator-controller-manager-59578bc799-7kfp8" Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.474420 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8pb5z\" (UniqueName: \"kubernetes.io/projected/09559c43-2ce0-4bcf-a5f5-b30a42d5fbd9-kube-api-access-8pb5z\") pod \"redhat-operators-m7ldp\" (UID: \"09559c43-2ce0-4bcf-a5f5-b30a42d5fbd9\") " pod="openshift-marketplace/redhat-operators-m7ldp" Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.475828 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-m7ldp"] Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.478134 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"neutron-operator-controller-manager-dockercfg-6cvd6" Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.483466 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/heat-operator-controller-manager-6d9967f8dd-lgszb" Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.487379 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/neutron-operator-controller-manager-797d478b46-rwhzb"] Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.491244 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/nova-operator-controller-manager-57bb74c7bf-6rfxm"] Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.492633 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/nova-operator-controller-manager-57bb74c7bf-6rfxm" Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.503018 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/octavia-operator-controller-manager-6d7c7ddf95-l9z67"] Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.514000 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9k2k6\" (UniqueName: \"kubernetes.io/projected/2448a4a6-cf4c-45ec-89cc-25621f444988-kube-api-access-9k2k6\") pod \"manila-operator-controller-manager-59578bc799-7kfp8\" (UID: \"2448a4a6-cf4c-45ec-89cc-25621f444988\") " pod="openstack-operators/manila-operator-controller-manager-59578bc799-7kfp8" Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.519073 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6sj5f\" (UniqueName: \"kubernetes.io/projected/a168d198-c58b-4674-8286-72fb306036b2-kube-api-access-6sj5f\") pod \"ironic-operator-controller-manager-74cb5cbc49-p96zk\" (UID: \"a168d198-c58b-4674-8286-72fb306036b2\") " pod="openstack-operators/ironic-operator-controller-manager-74cb5cbc49-p96zk" Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.523265 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gxqvp\" (UniqueName: \"kubernetes.io/projected/2fe367a2-f6f5-47ea-97a5-75a8b79778fb-kube-api-access-gxqvp\") pod \"keystone-operator-controller-manager-ddb98f99b-npmgn\" (UID: \"2fe367a2-f6f5-47ea-97a5-75a8b79778fb\") " pod="openstack-operators/keystone-operator-controller-manager-ddb98f99b-npmgn" Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.523847 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/nova-operator-controller-manager-57bb74c7bf-6rfxm"] Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.523992 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/octavia-operator-controller-manager-6d7c7ddf95-l9z67" Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.530980 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"nova-operator-controller-manager-dockercfg-6nfpq" Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.531240 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"octavia-operator-controller-manager-dockercfg-qd85q" Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.548320 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ovn-operator-controller-manager-869cc7797f-t29x4"] Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.549796 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ovn-operator-controller-manager-869cc7797f-t29x4" Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.556549 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ovn-operator-controller-manager-dockercfg-vz4n9" Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.559868 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-6cc7fb757dqqlc4"] Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.560550 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-w7bgq" Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.560844 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="525c5514-1646-41bf-bda9-04cdd47ff203" containerName="registry-server" Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.561295 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6cc7fb757dqqlc4" Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.573338 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ovn-operator-controller-manager-869cc7797f-t29x4"] Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.573694 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-controller-manager-dockercfg-trw2c" Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.573870 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-webhook-server-cert" Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.575368 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d4qbm\" (UniqueName: \"kubernetes.io/projected/e9fb9720-fc76-47a8-b111-63b5bc2899da-kube-api-access-d4qbm\") pod \"mariadb-operator-controller-manager-5777b4f897-l86k6\" (UID: \"e9fb9720-fc76-47a8-b111-63b5bc2899da\") " pod="openstack-operators/mariadb-operator-controller-manager-5777b4f897-l86k6" Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.575413 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/09559c43-2ce0-4bcf-a5f5-b30a42d5fbd9-catalog-content\") pod \"redhat-operators-m7ldp\" (UID: \"09559c43-2ce0-4bcf-a5f5-b30a42d5fbd9\") " pod="openshift-marketplace/redhat-operators-m7ldp" Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.575459 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8pb5z\" (UniqueName: \"kubernetes.io/projected/09559c43-2ce0-4bcf-a5f5-b30a42d5fbd9-kube-api-access-8pb5z\") pod \"redhat-operators-m7ldp\" (UID: \"09559c43-2ce0-4bcf-a5f5-b30a42d5fbd9\") " pod="openshift-marketplace/redhat-operators-m7ldp" Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.575480 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dbm8m\" (UniqueName: \"kubernetes.io/projected/48b2c137-9726-4e99-b0ca-a3cdb3bc6a1b-kube-api-access-dbm8m\") pod \"neutron-operator-controller-manager-797d478b46-rwhzb\" (UID: \"48b2c137-9726-4e99-b0ca-a3cdb3bc6a1b\") " pod="openstack-operators/neutron-operator-controller-manager-797d478b46-rwhzb" Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.575497 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/09559c43-2ce0-4bcf-a5f5-b30a42d5fbd9-utilities\") pod \"redhat-operators-m7ldp\" (UID: \"09559c43-2ce0-4bcf-a5f5-b30a42d5fbd9\") " pod="openshift-marketplace/redhat-operators-m7ldp" Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.576023 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/09559c43-2ce0-4bcf-a5f5-b30a42d5fbd9-utilities\") pod \"redhat-operators-m7ldp\" (UID: \"09559c43-2ce0-4bcf-a5f5-b30a42d5fbd9\") " pod="openshift-marketplace/redhat-operators-m7ldp" Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.576522 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/09559c43-2ce0-4bcf-a5f5-b30a42d5fbd9-catalog-content\") pod \"redhat-operators-m7ldp\" (UID: \"09559c43-2ce0-4bcf-a5f5-b30a42d5fbd9\") " pod="openshift-marketplace/redhat-operators-m7ldp" Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.579675 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/octavia-operator-controller-manager-6d7c7ddf95-l9z67"] Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.579737 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/placement-operator-controller-manager-664664cb68-jkj5p"] Oct 11 04:08:36 crc kubenswrapper[4798]: E1011 04:08:36.583155 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="525c5514-1646-41bf-bda9-04cdd47ff203" containerName="extract-utilities" Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.583190 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="525c5514-1646-41bf-bda9-04cdd47ff203" containerName="extract-utilities" Oct 11 04:08:36 crc kubenswrapper[4798]: E1011 04:08:36.583201 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="525c5514-1646-41bf-bda9-04cdd47ff203" containerName="registry-server" Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.583207 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="525c5514-1646-41bf-bda9-04cdd47ff203" containerName="registry-server" Oct 11 04:08:36 crc kubenswrapper[4798]: E1011 04:08:36.583227 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="525c5514-1646-41bf-bda9-04cdd47ff203" containerName="extract-content" Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.583233 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="525c5514-1646-41bf-bda9-04cdd47ff203" containerName="extract-content" Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.584868 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/placement-operator-controller-manager-664664cb68-jkj5p" Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.587560 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"placement-operator-controller-manager-dockercfg-g9wkl" Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.599231 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-6cc7fb757dqqlc4"] Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.615040 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/designate-operator-controller-manager-687df44cdb-x5n77" Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.658366 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8pb5z\" (UniqueName: \"kubernetes.io/projected/09559c43-2ce0-4bcf-a5f5-b30a42d5fbd9-kube-api-access-8pb5z\") pod \"redhat-operators-m7ldp\" (UID: \"09559c43-2ce0-4bcf-a5f5-b30a42d5fbd9\") " pod="openshift-marketplace/redhat-operators-m7ldp" Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.658846 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d4qbm\" (UniqueName: \"kubernetes.io/projected/e9fb9720-fc76-47a8-b111-63b5bc2899da-kube-api-access-d4qbm\") pod \"mariadb-operator-controller-manager-5777b4f897-l86k6\" (UID: \"e9fb9720-fc76-47a8-b111-63b5bc2899da\") " pod="openstack-operators/mariadb-operator-controller-manager-5777b4f897-l86k6" Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.658840 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-6d74794d9b-vpg7r" Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.659622 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dbm8m\" (UniqueName: \"kubernetes.io/projected/48b2c137-9726-4e99-b0ca-a3cdb3bc6a1b-kube-api-access-dbm8m\") pod \"neutron-operator-controller-manager-797d478b46-rwhzb\" (UID: \"48b2c137-9726-4e99-b0ca-a3cdb3bc6a1b\") " pod="openstack-operators/neutron-operator-controller-manager-797d478b46-rwhzb" Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.712994 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/placement-operator-controller-manager-664664cb68-jkj5p"] Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.713675 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xmpgh\" (UniqueName: \"kubernetes.io/projected/525c5514-1646-41bf-bda9-04cdd47ff203-kube-api-access-xmpgh\") pod \"525c5514-1646-41bf-bda9-04cdd47ff203\" (UID: \"525c5514-1646-41bf-bda9-04cdd47ff203\") " Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.713753 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/525c5514-1646-41bf-bda9-04cdd47ff203-utilities\") pod \"525c5514-1646-41bf-bda9-04cdd47ff203\" (UID: \"525c5514-1646-41bf-bda9-04cdd47ff203\") " Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.713914 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/525c5514-1646-41bf-bda9-04cdd47ff203-catalog-content\") pod \"525c5514-1646-41bf-bda9-04cdd47ff203\" (UID: \"525c5514-1646-41bf-bda9-04cdd47ff203\") " Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.714561 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x65mv\" (UniqueName: \"kubernetes.io/projected/7f1693fd-2ec7-4047-a2df-ffa5d7c94e67-kube-api-access-x65mv\") pod \"nova-operator-controller-manager-57bb74c7bf-6rfxm\" (UID: \"7f1693fd-2ec7-4047-a2df-ffa5d7c94e67\") " pod="openstack-operators/nova-operator-controller-manager-57bb74c7bf-6rfxm" Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.714630 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dkh5x\" (UniqueName: \"kubernetes.io/projected/9b7c0cf6-604f-43aa-ad27-0a2f54507751-kube-api-access-dkh5x\") pod \"ovn-operator-controller-manager-869cc7797f-t29x4\" (UID: \"9b7c0cf6-604f-43aa-ad27-0a2f54507751\") " pod="openstack-operators/ovn-operator-controller-manager-869cc7797f-t29x4" Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.714679 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qx868\" (UniqueName: \"kubernetes.io/projected/d7c1b935-ea3d-4d7b-96ab-5eb98fbaaea8-kube-api-access-qx868\") pod \"placement-operator-controller-manager-664664cb68-jkj5p\" (UID: \"d7c1b935-ea3d-4d7b-96ab-5eb98fbaaea8\") " pod="openstack-operators/placement-operator-controller-manager-664664cb68-jkj5p" Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.714716 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s6k6f\" (UniqueName: \"kubernetes.io/projected/60303330-3699-43be-ba28-96cd788e1cf0-kube-api-access-s6k6f\") pod \"openstack-baremetal-operator-controller-manager-6cc7fb757dqqlc4\" (UID: \"60303330-3699-43be-ba28-96cd788e1cf0\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6cc7fb757dqqlc4" Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.714734 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-585fc5b659-zsdqq" Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.714778 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/60303330-3699-43be-ba28-96cd788e1cf0-cert\") pod \"openstack-baremetal-operator-controller-manager-6cc7fb757dqqlc4\" (UID: \"60303330-3699-43be-ba28-96cd788e1cf0\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6cc7fb757dqqlc4" Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.714842 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4vc5j\" (UniqueName: \"kubernetes.io/projected/13f1e30f-4d2b-4517-86ae-8dc8334b0841-kube-api-access-4vc5j\") pod \"octavia-operator-controller-manager-6d7c7ddf95-l9z67\" (UID: \"13f1e30f-4d2b-4517-86ae-8dc8334b0841\") " pod="openstack-operators/octavia-operator-controller-manager-6d7c7ddf95-l9z67" Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.720485 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/525c5514-1646-41bf-bda9-04cdd47ff203-utilities" (OuterVolumeSpecName: "utilities") pod "525c5514-1646-41bf-bda9-04cdd47ff203" (UID: "525c5514-1646-41bf-bda9-04cdd47ff203"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.728264 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-ddb98f99b-npmgn" Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.746746 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ironic-operator-controller-manager-74cb5cbc49-p96zk" Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.749671 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/swift-operator-controller-manager-5f4d5dfdc6-fffgl"] Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.752132 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-5f4d5dfdc6-fffgl" Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.757251 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"swift-operator-controller-manager-dockercfg-2k7hx" Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.768719 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-59578bc799-7kfp8" Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.777442 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-578874c84d-zg2f8"] Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.779031 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/telemetry-operator-controller-manager-578874c84d-zg2f8" Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.782856 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-5f4d5dfdc6-fffgl"] Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.785806 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"telemetry-operator-controller-manager-dockercfg-2brjv" Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.790050 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-578874c84d-zg2f8"] Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.790364 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/525c5514-1646-41bf-bda9-04cdd47ff203-kube-api-access-xmpgh" (OuterVolumeSpecName: "kube-api-access-xmpgh") pod "525c5514-1646-41bf-bda9-04cdd47ff203" (UID: "525c5514-1646-41bf-bda9-04cdd47ff203"). InnerVolumeSpecName "kube-api-access-xmpgh". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.794717 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-5777b4f897-l86k6" Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.816084 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/60303330-3699-43be-ba28-96cd788e1cf0-cert\") pod \"openstack-baremetal-operator-controller-manager-6cc7fb757dqqlc4\" (UID: \"60303330-3699-43be-ba28-96cd788e1cf0\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6cc7fb757dqqlc4" Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.816138 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4vc5j\" (UniqueName: \"kubernetes.io/projected/13f1e30f-4d2b-4517-86ae-8dc8334b0841-kube-api-access-4vc5j\") pod \"octavia-operator-controller-manager-6d7c7ddf95-l9z67\" (UID: \"13f1e30f-4d2b-4517-86ae-8dc8334b0841\") " pod="openstack-operators/octavia-operator-controller-manager-6d7c7ddf95-l9z67" Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.816204 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x65mv\" (UniqueName: \"kubernetes.io/projected/7f1693fd-2ec7-4047-a2df-ffa5d7c94e67-kube-api-access-x65mv\") pod \"nova-operator-controller-manager-57bb74c7bf-6rfxm\" (UID: \"7f1693fd-2ec7-4047-a2df-ffa5d7c94e67\") " pod="openstack-operators/nova-operator-controller-manager-57bb74c7bf-6rfxm" Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.816232 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dkh5x\" (UniqueName: \"kubernetes.io/projected/9b7c0cf6-604f-43aa-ad27-0a2f54507751-kube-api-access-dkh5x\") pod \"ovn-operator-controller-manager-869cc7797f-t29x4\" (UID: \"9b7c0cf6-604f-43aa-ad27-0a2f54507751\") " pod="openstack-operators/ovn-operator-controller-manager-869cc7797f-t29x4" Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.816249 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qx868\" (UniqueName: \"kubernetes.io/projected/d7c1b935-ea3d-4d7b-96ab-5eb98fbaaea8-kube-api-access-qx868\") pod \"placement-operator-controller-manager-664664cb68-jkj5p\" (UID: \"d7c1b935-ea3d-4d7b-96ab-5eb98fbaaea8\") " pod="openstack-operators/placement-operator-controller-manager-664664cb68-jkj5p" Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.816275 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s6k6f\" (UniqueName: \"kubernetes.io/projected/60303330-3699-43be-ba28-96cd788e1cf0-kube-api-access-s6k6f\") pod \"openstack-baremetal-operator-controller-manager-6cc7fb757dqqlc4\" (UID: \"60303330-3699-43be-ba28-96cd788e1cf0\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6cc7fb757dqqlc4" Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.816322 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xmpgh\" (UniqueName: \"kubernetes.io/projected/525c5514-1646-41bf-bda9-04cdd47ff203-kube-api-access-xmpgh\") on node \"crc\" DevicePath \"\"" Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.816334 4798 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/525c5514-1646-41bf-bda9-04cdd47ff203-utilities\") on node \"crc\" DevicePath \"\"" Oct 11 04:08:36 crc kubenswrapper[4798]: E1011 04:08:36.816721 4798 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Oct 11 04:08:36 crc kubenswrapper[4798]: E1011 04:08:36.816772 4798 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/60303330-3699-43be-ba28-96cd788e1cf0-cert podName:60303330-3699-43be-ba28-96cd788e1cf0 nodeName:}" failed. No retries permitted until 2025-10-11 04:08:37.316751356 +0000 UTC m=+812.653041042 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/60303330-3699-43be-ba28-96cd788e1cf0-cert") pod "openstack-baremetal-operator-controller-manager-6cc7fb757dqqlc4" (UID: "60303330-3699-43be-ba28-96cd788e1cf0") : secret "openstack-baremetal-operator-webhook-server-cert" not found Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.829170 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/test-operator-controller-manager-ffcdd6c94-4t4ft"] Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.830295 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-ffcdd6c94-4t4ft" Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.834645 4798 generic.go:334] "Generic (PLEG): container finished" podID="525c5514-1646-41bf-bda9-04cdd47ff203" containerID="f7e7df1d6ec722d40cdd26dbe0c0b9d6e95d50697ca8680eab029b166782252d" exitCode=0 Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.834691 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-w7bgq" event={"ID":"525c5514-1646-41bf-bda9-04cdd47ff203","Type":"ContainerDied","Data":"f7e7df1d6ec722d40cdd26dbe0c0b9d6e95d50697ca8680eab029b166782252d"} Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.834723 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-w7bgq" event={"ID":"525c5514-1646-41bf-bda9-04cdd47ff203","Type":"ContainerDied","Data":"58f76a7d1e211328d1d535b8b23e1e6d35f1c2aed4911489c05a036aabac57b7"} Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.834743 4798 scope.go:117] "RemoveContainer" containerID="f7e7df1d6ec722d40cdd26dbe0c0b9d6e95d50697ca8680eab029b166782252d" Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.834907 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-w7bgq" Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.835357 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-m7ldp" Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.843573 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-ffcdd6c94-4t4ft"] Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.850976 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"test-operator-controller-manager-dockercfg-vwf7j" Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.853445 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/525c5514-1646-41bf-bda9-04cdd47ff203-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "525c5514-1646-41bf-bda9-04cdd47ff203" (UID: "525c5514-1646-41bf-bda9-04cdd47ff203"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.887175 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qx868\" (UniqueName: \"kubernetes.io/projected/d7c1b935-ea3d-4d7b-96ab-5eb98fbaaea8-kube-api-access-qx868\") pod \"placement-operator-controller-manager-664664cb68-jkj5p\" (UID: \"d7c1b935-ea3d-4d7b-96ab-5eb98fbaaea8\") " pod="openstack-operators/placement-operator-controller-manager-664664cb68-jkj5p" Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.894908 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/watcher-operator-controller-manager-646675d848-68hdd"] Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.902257 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/neutron-operator-controller-manager-797d478b46-rwhzb" Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.903553 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-646675d848-68hdd" Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.905957 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x65mv\" (UniqueName: \"kubernetes.io/projected/7f1693fd-2ec7-4047-a2df-ffa5d7c94e67-kube-api-access-x65mv\") pod \"nova-operator-controller-manager-57bb74c7bf-6rfxm\" (UID: \"7f1693fd-2ec7-4047-a2df-ffa5d7c94e67\") " pod="openstack-operators/nova-operator-controller-manager-57bb74c7bf-6rfxm" Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.908558 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"watcher-operator-controller-manager-dockercfg-bw7np" Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.911482 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dkh5x\" (UniqueName: \"kubernetes.io/projected/9b7c0cf6-604f-43aa-ad27-0a2f54507751-kube-api-access-dkh5x\") pod \"ovn-operator-controller-manager-869cc7797f-t29x4\" (UID: \"9b7c0cf6-604f-43aa-ad27-0a2f54507751\") " pod="openstack-operators/ovn-operator-controller-manager-869cc7797f-t29x4" Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.914730 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/nova-operator-controller-manager-57bb74c7bf-6rfxm" Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.916457 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s6k6f\" (UniqueName: \"kubernetes.io/projected/60303330-3699-43be-ba28-96cd788e1cf0-kube-api-access-s6k6f\") pod \"openstack-baremetal-operator-controller-manager-6cc7fb757dqqlc4\" (UID: \"60303330-3699-43be-ba28-96cd788e1cf0\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6cc7fb757dqqlc4" Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.917266 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b7fvl\" (UniqueName: \"kubernetes.io/projected/f0952c0e-9c58-446c-90eb-61c0f8f4d64a-kube-api-access-b7fvl\") pod \"swift-operator-controller-manager-5f4d5dfdc6-fffgl\" (UID: \"f0952c0e-9c58-446c-90eb-61c0f8f4d64a\") " pod="openstack-operators/swift-operator-controller-manager-5f4d5dfdc6-fffgl" Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.917312 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jgjcr\" (UniqueName: \"kubernetes.io/projected/0aecec3e-5bdb-4da4-b0a5-b3f9c7512a94-kube-api-access-jgjcr\") pod \"telemetry-operator-controller-manager-578874c84d-zg2f8\" (UID: \"0aecec3e-5bdb-4da4-b0a5-b3f9c7512a94\") " pod="openstack-operators/telemetry-operator-controller-manager-578874c84d-zg2f8" Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.917369 4798 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/525c5514-1646-41bf-bda9-04cdd47ff203-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.917436 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-646675d848-68hdd"] Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.955284 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4vc5j\" (UniqueName: \"kubernetes.io/projected/13f1e30f-4d2b-4517-86ae-8dc8334b0841-kube-api-access-4vc5j\") pod \"octavia-operator-controller-manager-6d7c7ddf95-l9z67\" (UID: \"13f1e30f-4d2b-4517-86ae-8dc8334b0841\") " pod="openstack-operators/octavia-operator-controller-manager-6d7c7ddf95-l9z67" Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.983249 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-controller-manager-7d8c4595d6-d6fzt"] Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.988655 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-manager-7d8c4595d6-d6fzt" Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.993308 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-manager-dockercfg-kgwcb" Oct 11 04:08:36 crc kubenswrapper[4798]: I1011 04:08:36.993575 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"webhook-server-cert" Oct 11 04:08:37 crc kubenswrapper[4798]: I1011 04:08:37.010017 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-manager-7d8c4595d6-d6fzt"] Oct 11 04:08:37 crc kubenswrapper[4798]: I1011 04:08:37.022332 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7kn4l\" (UniqueName: \"kubernetes.io/projected/2c332094-6f91-4065-829a-736e42bd6560-kube-api-access-7kn4l\") pod \"test-operator-controller-manager-ffcdd6c94-4t4ft\" (UID: \"2c332094-6f91-4065-829a-736e42bd6560\") " pod="openstack-operators/test-operator-controller-manager-ffcdd6c94-4t4ft" Oct 11 04:08:37 crc kubenswrapper[4798]: I1011 04:08:37.022409 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b7fvl\" (UniqueName: \"kubernetes.io/projected/f0952c0e-9c58-446c-90eb-61c0f8f4d64a-kube-api-access-b7fvl\") pod \"swift-operator-controller-manager-5f4d5dfdc6-fffgl\" (UID: \"f0952c0e-9c58-446c-90eb-61c0f8f4d64a\") " pod="openstack-operators/swift-operator-controller-manager-5f4d5dfdc6-fffgl" Oct 11 04:08:37 crc kubenswrapper[4798]: I1011 04:08:37.022442 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qt44t\" (UniqueName: \"kubernetes.io/projected/eb92bfae-ca3f-4e70-9fa1-440a6154cb53-kube-api-access-qt44t\") pod \"watcher-operator-controller-manager-646675d848-68hdd\" (UID: \"eb92bfae-ca3f-4e70-9fa1-440a6154cb53\") " pod="openstack-operators/watcher-operator-controller-manager-646675d848-68hdd" Oct 11 04:08:37 crc kubenswrapper[4798]: I1011 04:08:37.022463 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jgjcr\" (UniqueName: \"kubernetes.io/projected/0aecec3e-5bdb-4da4-b0a5-b3f9c7512a94-kube-api-access-jgjcr\") pod \"telemetry-operator-controller-manager-578874c84d-zg2f8\" (UID: \"0aecec3e-5bdb-4da4-b0a5-b3f9c7512a94\") " pod="openstack-operators/telemetry-operator-controller-manager-578874c84d-zg2f8" Oct 11 04:08:37 crc kubenswrapper[4798]: I1011 04:08:37.037802 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-8h9p4"] Oct 11 04:08:37 crc kubenswrapper[4798]: I1011 04:08:37.038992 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-8h9p4" Oct 11 04:08:37 crc kubenswrapper[4798]: I1011 04:08:37.041176 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"rabbitmq-cluster-operator-controller-manager-dockercfg-xt4r6" Oct 11 04:08:37 crc kubenswrapper[4798]: I1011 04:08:37.062466 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jgjcr\" (UniqueName: \"kubernetes.io/projected/0aecec3e-5bdb-4da4-b0a5-b3f9c7512a94-kube-api-access-jgjcr\") pod \"telemetry-operator-controller-manager-578874c84d-zg2f8\" (UID: \"0aecec3e-5bdb-4da4-b0a5-b3f9c7512a94\") " pod="openstack-operators/telemetry-operator-controller-manager-578874c84d-zg2f8" Oct 11 04:08:37 crc kubenswrapper[4798]: I1011 04:08:37.062724 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b7fvl\" (UniqueName: \"kubernetes.io/projected/f0952c0e-9c58-446c-90eb-61c0f8f4d64a-kube-api-access-b7fvl\") pod \"swift-operator-controller-manager-5f4d5dfdc6-fffgl\" (UID: \"f0952c0e-9c58-446c-90eb-61c0f8f4d64a\") " pod="openstack-operators/swift-operator-controller-manager-5f4d5dfdc6-fffgl" Oct 11 04:08:37 crc kubenswrapper[4798]: I1011 04:08:37.067038 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-8h9p4"] Oct 11 04:08:37 crc kubenswrapper[4798]: I1011 04:08:37.111505 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/octavia-operator-controller-manager-6d7c7ddf95-l9z67" Oct 11 04:08:37 crc kubenswrapper[4798]: I1011 04:08:37.129438 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ovn-operator-controller-manager-869cc7797f-t29x4" Oct 11 04:08:37 crc kubenswrapper[4798]: I1011 04:08:37.129437 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qt44t\" (UniqueName: \"kubernetes.io/projected/eb92bfae-ca3f-4e70-9fa1-440a6154cb53-kube-api-access-qt44t\") pod \"watcher-operator-controller-manager-646675d848-68hdd\" (UID: \"eb92bfae-ca3f-4e70-9fa1-440a6154cb53\") " pod="openstack-operators/watcher-operator-controller-manager-646675d848-68hdd" Oct 11 04:08:37 crc kubenswrapper[4798]: I1011 04:08:37.132231 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jbg87\" (UniqueName: \"kubernetes.io/projected/a368474b-c03b-43b2-96af-0bb78d9f6ee6-kube-api-access-jbg87\") pod \"rabbitmq-cluster-operator-manager-5f97d8c699-8h9p4\" (UID: \"a368474b-c03b-43b2-96af-0bb78d9f6ee6\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-8h9p4" Oct 11 04:08:37 crc kubenswrapper[4798]: I1011 04:08:37.132287 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7kn4l\" (UniqueName: \"kubernetes.io/projected/2c332094-6f91-4065-829a-736e42bd6560-kube-api-access-7kn4l\") pod \"test-operator-controller-manager-ffcdd6c94-4t4ft\" (UID: \"2c332094-6f91-4065-829a-736e42bd6560\") " pod="openstack-operators/test-operator-controller-manager-ffcdd6c94-4t4ft" Oct 11 04:08:37 crc kubenswrapper[4798]: I1011 04:08:37.132325 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/a0f0b4cf-9851-4d84-81e0-7130b9777793-cert\") pod \"openstack-operator-controller-manager-7d8c4595d6-d6fzt\" (UID: \"a0f0b4cf-9851-4d84-81e0-7130b9777793\") " pod="openstack-operators/openstack-operator-controller-manager-7d8c4595d6-d6fzt" Oct 11 04:08:37 crc kubenswrapper[4798]: I1011 04:08:37.132415 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n942s\" (UniqueName: \"kubernetes.io/projected/a0f0b4cf-9851-4d84-81e0-7130b9777793-kube-api-access-n942s\") pod \"openstack-operator-controller-manager-7d8c4595d6-d6fzt\" (UID: \"a0f0b4cf-9851-4d84-81e0-7130b9777793\") " pod="openstack-operators/openstack-operator-controller-manager-7d8c4595d6-d6fzt" Oct 11 04:08:37 crc kubenswrapper[4798]: I1011 04:08:37.160480 4798 scope.go:117] "RemoveContainer" containerID="5cedbfed0b54d754af83af63d1aa74d559b19c0ba4b8074e21df42a7d73b61e6" Oct 11 04:08:37 crc kubenswrapper[4798]: I1011 04:08:37.165082 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7kn4l\" (UniqueName: \"kubernetes.io/projected/2c332094-6f91-4065-829a-736e42bd6560-kube-api-access-7kn4l\") pod \"test-operator-controller-manager-ffcdd6c94-4t4ft\" (UID: \"2c332094-6f91-4065-829a-736e42bd6560\") " pod="openstack-operators/test-operator-controller-manager-ffcdd6c94-4t4ft" Oct 11 04:08:37 crc kubenswrapper[4798]: I1011 04:08:37.165604 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qt44t\" (UniqueName: \"kubernetes.io/projected/eb92bfae-ca3f-4e70-9fa1-440a6154cb53-kube-api-access-qt44t\") pod \"watcher-operator-controller-manager-646675d848-68hdd\" (UID: \"eb92bfae-ca3f-4e70-9fa1-440a6154cb53\") " pod="openstack-operators/watcher-operator-controller-manager-646675d848-68hdd" Oct 11 04:08:37 crc kubenswrapper[4798]: I1011 04:08:37.235055 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n942s\" (UniqueName: \"kubernetes.io/projected/a0f0b4cf-9851-4d84-81e0-7130b9777793-kube-api-access-n942s\") pod \"openstack-operator-controller-manager-7d8c4595d6-d6fzt\" (UID: \"a0f0b4cf-9851-4d84-81e0-7130b9777793\") " pod="openstack-operators/openstack-operator-controller-manager-7d8c4595d6-d6fzt" Oct 11 04:08:37 crc kubenswrapper[4798]: I1011 04:08:37.235189 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jbg87\" (UniqueName: \"kubernetes.io/projected/a368474b-c03b-43b2-96af-0bb78d9f6ee6-kube-api-access-jbg87\") pod \"rabbitmq-cluster-operator-manager-5f97d8c699-8h9p4\" (UID: \"a368474b-c03b-43b2-96af-0bb78d9f6ee6\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-8h9p4" Oct 11 04:08:37 crc kubenswrapper[4798]: I1011 04:08:37.235242 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/a0f0b4cf-9851-4d84-81e0-7130b9777793-cert\") pod \"openstack-operator-controller-manager-7d8c4595d6-d6fzt\" (UID: \"a0f0b4cf-9851-4d84-81e0-7130b9777793\") " pod="openstack-operators/openstack-operator-controller-manager-7d8c4595d6-d6fzt" Oct 11 04:08:37 crc kubenswrapper[4798]: E1011 04:08:37.235443 4798 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Oct 11 04:08:37 crc kubenswrapper[4798]: E1011 04:08:37.235513 4798 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/a0f0b4cf-9851-4d84-81e0-7130b9777793-cert podName:a0f0b4cf-9851-4d84-81e0-7130b9777793 nodeName:}" failed. No retries permitted until 2025-10-11 04:08:37.735489187 +0000 UTC m=+813.071778873 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/a0f0b4cf-9851-4d84-81e0-7130b9777793-cert") pod "openstack-operator-controller-manager-7d8c4595d6-d6fzt" (UID: "a0f0b4cf-9851-4d84-81e0-7130b9777793") : secret "webhook-server-cert" not found Oct 11 04:08:37 crc kubenswrapper[4798]: I1011 04:08:37.243984 4798 scope.go:117] "RemoveContainer" containerID="d9236002e37cd6ef9db4c7e657e50e3ebaebc93d858723f8d37e0103b61c2e0a" Oct 11 04:08:37 crc kubenswrapper[4798]: I1011 04:08:37.258901 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n942s\" (UniqueName: \"kubernetes.io/projected/a0f0b4cf-9851-4d84-81e0-7130b9777793-kube-api-access-n942s\") pod \"openstack-operator-controller-manager-7d8c4595d6-d6fzt\" (UID: \"a0f0b4cf-9851-4d84-81e0-7130b9777793\") " pod="openstack-operators/openstack-operator-controller-manager-7d8c4595d6-d6fzt" Oct 11 04:08:37 crc kubenswrapper[4798]: I1011 04:08:37.265846 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jbg87\" (UniqueName: \"kubernetes.io/projected/a368474b-c03b-43b2-96af-0bb78d9f6ee6-kube-api-access-jbg87\") pod \"rabbitmq-cluster-operator-manager-5f97d8c699-8h9p4\" (UID: \"a368474b-c03b-43b2-96af-0bb78d9f6ee6\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-8h9p4" Oct 11 04:08:37 crc kubenswrapper[4798]: I1011 04:08:37.299300 4798 scope.go:117] "RemoveContainer" containerID="f7e7df1d6ec722d40cdd26dbe0c0b9d6e95d50697ca8680eab029b166782252d" Oct 11 04:08:37 crc kubenswrapper[4798]: E1011 04:08:37.300100 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f7e7df1d6ec722d40cdd26dbe0c0b9d6e95d50697ca8680eab029b166782252d\": container with ID starting with f7e7df1d6ec722d40cdd26dbe0c0b9d6e95d50697ca8680eab029b166782252d not found: ID does not exist" containerID="f7e7df1d6ec722d40cdd26dbe0c0b9d6e95d50697ca8680eab029b166782252d" Oct 11 04:08:37 crc kubenswrapper[4798]: I1011 04:08:37.300147 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f7e7df1d6ec722d40cdd26dbe0c0b9d6e95d50697ca8680eab029b166782252d"} err="failed to get container status \"f7e7df1d6ec722d40cdd26dbe0c0b9d6e95d50697ca8680eab029b166782252d\": rpc error: code = NotFound desc = could not find container \"f7e7df1d6ec722d40cdd26dbe0c0b9d6e95d50697ca8680eab029b166782252d\": container with ID starting with f7e7df1d6ec722d40cdd26dbe0c0b9d6e95d50697ca8680eab029b166782252d not found: ID does not exist" Oct 11 04:08:37 crc kubenswrapper[4798]: I1011 04:08:37.300180 4798 scope.go:117] "RemoveContainer" containerID="5cedbfed0b54d754af83af63d1aa74d559b19c0ba4b8074e21df42a7d73b61e6" Oct 11 04:08:37 crc kubenswrapper[4798]: E1011 04:08:37.300613 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5cedbfed0b54d754af83af63d1aa74d559b19c0ba4b8074e21df42a7d73b61e6\": container with ID starting with 5cedbfed0b54d754af83af63d1aa74d559b19c0ba4b8074e21df42a7d73b61e6 not found: ID does not exist" containerID="5cedbfed0b54d754af83af63d1aa74d559b19c0ba4b8074e21df42a7d73b61e6" Oct 11 04:08:37 crc kubenswrapper[4798]: I1011 04:08:37.300649 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5cedbfed0b54d754af83af63d1aa74d559b19c0ba4b8074e21df42a7d73b61e6"} err="failed to get container status \"5cedbfed0b54d754af83af63d1aa74d559b19c0ba4b8074e21df42a7d73b61e6\": rpc error: code = NotFound desc = could not find container \"5cedbfed0b54d754af83af63d1aa74d559b19c0ba4b8074e21df42a7d73b61e6\": container with ID starting with 5cedbfed0b54d754af83af63d1aa74d559b19c0ba4b8074e21df42a7d73b61e6 not found: ID does not exist" Oct 11 04:08:37 crc kubenswrapper[4798]: I1011 04:08:37.300672 4798 scope.go:117] "RemoveContainer" containerID="d9236002e37cd6ef9db4c7e657e50e3ebaebc93d858723f8d37e0103b61c2e0a" Oct 11 04:08:37 crc kubenswrapper[4798]: E1011 04:08:37.301000 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d9236002e37cd6ef9db4c7e657e50e3ebaebc93d858723f8d37e0103b61c2e0a\": container with ID starting with d9236002e37cd6ef9db4c7e657e50e3ebaebc93d858723f8d37e0103b61c2e0a not found: ID does not exist" containerID="d9236002e37cd6ef9db4c7e657e50e3ebaebc93d858723f8d37e0103b61c2e0a" Oct 11 04:08:37 crc kubenswrapper[4798]: I1011 04:08:37.301046 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d9236002e37cd6ef9db4c7e657e50e3ebaebc93d858723f8d37e0103b61c2e0a"} err="failed to get container status \"d9236002e37cd6ef9db4c7e657e50e3ebaebc93d858723f8d37e0103b61c2e0a\": rpc error: code = NotFound desc = could not find container \"d9236002e37cd6ef9db4c7e657e50e3ebaebc93d858723f8d37e0103b61c2e0a\": container with ID starting with d9236002e37cd6ef9db4c7e657e50e3ebaebc93d858723f8d37e0103b61c2e0a not found: ID does not exist" Oct 11 04:08:37 crc kubenswrapper[4798]: I1011 04:08:37.336297 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/60303330-3699-43be-ba28-96cd788e1cf0-cert\") pod \"openstack-baremetal-operator-controller-manager-6cc7fb757dqqlc4\" (UID: \"60303330-3699-43be-ba28-96cd788e1cf0\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6cc7fb757dqqlc4" Oct 11 04:08:37 crc kubenswrapper[4798]: I1011 04:08:37.339960 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/placement-operator-controller-manager-664664cb68-jkj5p" Oct 11 04:08:37 crc kubenswrapper[4798]: I1011 04:08:37.344312 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/60303330-3699-43be-ba28-96cd788e1cf0-cert\") pod \"openstack-baremetal-operator-controller-manager-6cc7fb757dqqlc4\" (UID: \"60303330-3699-43be-ba28-96cd788e1cf0\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6cc7fb757dqqlc4" Oct 11 04:08:37 crc kubenswrapper[4798]: I1011 04:08:37.355427 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-5f4d5dfdc6-fffgl" Oct 11 04:08:37 crc kubenswrapper[4798]: I1011 04:08:37.374048 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-w7bgq"] Oct 11 04:08:37 crc kubenswrapper[4798]: I1011 04:08:37.376123 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/telemetry-operator-controller-manager-578874c84d-zg2f8" Oct 11 04:08:37 crc kubenswrapper[4798]: I1011 04:08:37.384710 4798 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-w7bgq"] Oct 11 04:08:37 crc kubenswrapper[4798]: I1011 04:08:37.401693 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-ffcdd6c94-4t4ft" Oct 11 04:08:37 crc kubenswrapper[4798]: I1011 04:08:37.434340 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-646675d848-68hdd" Oct 11 04:08:37 crc kubenswrapper[4798]: I1011 04:08:37.466213 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="525c5514-1646-41bf-bda9-04cdd47ff203" path="/var/lib/kubelet/pods/525c5514-1646-41bf-bda9-04cdd47ff203/volumes" Oct 11 04:08:37 crc kubenswrapper[4798]: I1011 04:08:37.538476 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/cinder-operator-controller-manager-59cdc64769-9cgvk"] Oct 11 04:08:37 crc kubenswrapper[4798]: I1011 04:08:37.575373 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6cc7fb757dqqlc4" Oct 11 04:08:37 crc kubenswrapper[4798]: I1011 04:08:37.601861 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-8h9p4" Oct 11 04:08:37 crc kubenswrapper[4798]: I1011 04:08:37.741896 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/a0f0b4cf-9851-4d84-81e0-7130b9777793-cert\") pod \"openstack-operator-controller-manager-7d8c4595d6-d6fzt\" (UID: \"a0f0b4cf-9851-4d84-81e0-7130b9777793\") " pod="openstack-operators/openstack-operator-controller-manager-7d8c4595d6-d6fzt" Oct 11 04:08:37 crc kubenswrapper[4798]: I1011 04:08:37.757408 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/a0f0b4cf-9851-4d84-81e0-7130b9777793-cert\") pod \"openstack-operator-controller-manager-7d8c4595d6-d6fzt\" (UID: \"a0f0b4cf-9851-4d84-81e0-7130b9777793\") " pod="openstack-operators/openstack-operator-controller-manager-7d8c4595d6-d6fzt" Oct 11 04:08:37 crc kubenswrapper[4798]: I1011 04:08:37.827175 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-manager-7d8c4595d6-d6fzt" Oct 11 04:08:37 crc kubenswrapper[4798]: I1011 04:08:37.842938 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-59cdc64769-9cgvk" event={"ID":"755c871f-8030-429f-9b9f-5b1a6c1e24e0","Type":"ContainerStarted","Data":"27e40f5df0392df5daedcfddda23a6addb880b22452bb488b940422c0bf4a15c"} Oct 11 04:08:37 crc kubenswrapper[4798]: I1011 04:08:37.900439 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-7f5fc6b5ff-4zk9f"] Oct 11 04:08:37 crc kubenswrapper[4798]: I1011 04:08:37.917633 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-64f84fcdbb-pvqqn"] Oct 11 04:08:38 crc kubenswrapper[4798]: I1011 04:08:38.224949 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-m7ldp"] Oct 11 04:08:38 crc kubenswrapper[4798]: I1011 04:08:38.232217 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ironic-operator-controller-manager-74cb5cbc49-p96zk"] Oct 11 04:08:38 crc kubenswrapper[4798]: I1011 04:08:38.237811 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-ddb98f99b-npmgn"] Oct 11 04:08:38 crc kubenswrapper[4798]: I1011 04:08:38.242831 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/heat-operator-controller-manager-6d9967f8dd-lgszb"] Oct 11 04:08:38 crc kubenswrapper[4798]: W1011 04:08:38.245420 4798 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod09559c43_2ce0_4bcf_a5f5_b30a42d5fbd9.slice/crio-1820131e4e89f460615d99d78100b1de144bc8b3cfd5280b8f9294b3e790c4b4 WatchSource:0}: Error finding container 1820131e4e89f460615d99d78100b1de144bc8b3cfd5280b8f9294b3e790c4b4: Status 404 returned error can't find the container with id 1820131e4e89f460615d99d78100b1de144bc8b3cfd5280b8f9294b3e790c4b4 Oct 11 04:08:38 crc kubenswrapper[4798]: I1011 04:08:38.574927 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/placement-operator-controller-manager-664664cb68-jkj5p"] Oct 11 04:08:38 crc kubenswrapper[4798]: I1011 04:08:38.579787 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/designate-operator-controller-manager-687df44cdb-x5n77"] Oct 11 04:08:38 crc kubenswrapper[4798]: W1011 04:08:38.610873 4798 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd7c1b935_ea3d_4d7b_96ab_5eb98fbaaea8.slice/crio-e463c22115880cd3c1ced8288080d81044ffc8ca7c547a8aafcdf1502294a244 WatchSource:0}: Error finding container e463c22115880cd3c1ced8288080d81044ffc8ca7c547a8aafcdf1502294a244: Status 404 returned error can't find the container with id e463c22115880cd3c1ced8288080d81044ffc8ca7c547a8aafcdf1502294a244 Oct 11 04:08:38 crc kubenswrapper[4798]: I1011 04:08:38.610991 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/nova-operator-controller-manager-57bb74c7bf-6rfxm"] Oct 11 04:08:38 crc kubenswrapper[4798]: I1011 04:08:38.613660 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/octavia-operator-controller-manager-6d7c7ddf95-l9z67"] Oct 11 04:08:38 crc kubenswrapper[4798]: I1011 04:08:38.621013 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-5777b4f897-l86k6"] Oct 11 04:08:38 crc kubenswrapper[4798]: I1011 04:08:38.631343 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ovn-operator-controller-manager-869cc7797f-t29x4"] Oct 11 04:08:38 crc kubenswrapper[4798]: I1011 04:08:38.637949 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-6d74794d9b-vpg7r"] Oct 11 04:08:38 crc kubenswrapper[4798]: W1011 04:08:38.646663 4798 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod7f1693fd_2ec7_4047_a2df_ffa5d7c94e67.slice/crio-dde7c20fbddd85e38a9da9250ff284a1a238cf407c3fcccc9630002dd02d32b9 WatchSource:0}: Error finding container dde7c20fbddd85e38a9da9250ff284a1a238cf407c3fcccc9630002dd02d32b9: Status 404 returned error can't find the container with id dde7c20fbddd85e38a9da9250ff284a1a238cf407c3fcccc9630002dd02d32b9 Oct 11 04:08:38 crc kubenswrapper[4798]: I1011 04:08:38.646772 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/neutron-operator-controller-manager-797d478b46-rwhzb"] Oct 11 04:08:38 crc kubenswrapper[4798]: W1011 04:08:38.649581 4798 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode9fb9720_fc76_47a8_b111_63b5bc2899da.slice/crio-f8713f6d25ae9ffa5259ee31b689768ef480eeb1bccc274ff14877b69fc4aa47 WatchSource:0}: Error finding container f8713f6d25ae9ffa5259ee31b689768ef480eeb1bccc274ff14877b69fc4aa47: Status 404 returned error can't find the container with id f8713f6d25ae9ffa5259ee31b689768ef480eeb1bccc274ff14877b69fc4aa47 Oct 11 04:08:38 crc kubenswrapper[4798]: I1011 04:08:38.654646 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-59578bc799-7kfp8"] Oct 11 04:08:38 crc kubenswrapper[4798]: I1011 04:08:38.663309 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-585fc5b659-zsdqq"] Oct 11 04:08:38 crc kubenswrapper[4798]: W1011 04:08:38.668653 4798 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod48b2c137_9726_4e99_b0ca_a3cdb3bc6a1b.slice/crio-0a231d59158876008ab3482b86365f59bcf2cd96b0cc9c43639a76fb4dacb9bd WatchSource:0}: Error finding container 0a231d59158876008ab3482b86365f59bcf2cd96b0cc9c43639a76fb4dacb9bd: Status 404 returned error can't find the container with id 0a231d59158876008ab3482b86365f59bcf2cd96b0cc9c43639a76fb4dacb9bd Oct 11 04:08:38 crc kubenswrapper[4798]: E1011 04:08:38.684270 4798 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/horizon-operator@sha256:063a7e65b4ba98f0506f269ff7525b446eae06a5ed4a61c18ffa33a886500867,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-k544d,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod horizon-operator-controller-manager-6d74794d9b-vpg7r_openstack-operators(332f7594-84b2-4761-a067-a32c31469e4f): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Oct 11 04:08:38 crc kubenswrapper[4798]: W1011 04:08:38.687492 4798 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod01834c42_7ee0_4576_9136_5812fc37e1cc.slice/crio-a0255ba646e18b45cee53ed8f66f007bbe734014657f74c75b045be15d9dfdb8 WatchSource:0}: Error finding container a0255ba646e18b45cee53ed8f66f007bbe734014657f74c75b045be15d9dfdb8: Status 404 returned error can't find the container with id a0255ba646e18b45cee53ed8f66f007bbe734014657f74c75b045be15d9dfdb8 Oct 11 04:08:38 crc kubenswrapper[4798]: E1011 04:08:38.690358 4798 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/infra-operator@sha256:5cfb2ae1092445950b39dd59caa9a8c9367f42fb8353a8c3848d3bc729f24492,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:true,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{600 -3} {} 600m DecimalSI},memory: {{2147483648 0} {} 2Gi BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{536870912 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:cert,ReadOnly:true,MountPath:/tmp/k8s-webhook-server/serving-certs,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-rwstx,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod infra-operator-controller-manager-585fc5b659-zsdqq_openstack-operators(01834c42-7ee0-4576-9136-5812fc37e1cc): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Oct 11 04:08:38 crc kubenswrapper[4798]: I1011 04:08:38.895639 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-74cb5cbc49-p96zk" event={"ID":"a168d198-c58b-4674-8286-72fb306036b2","Type":"ContainerStarted","Data":"643b7c0fb0fe8493513c5cd437a3795b0826d58078e3a9b62dd603e2cac6ca7e"} Oct 11 04:08:38 crc kubenswrapper[4798]: I1011 04:08:38.896791 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-7f5fc6b5ff-4zk9f" event={"ID":"4490c7b3-a500-4224-8597-abd71de4fa13","Type":"ContainerStarted","Data":"1c7d79adbee5b43478c4c9e2557736bb673ade28b55c41734c56412bbda26b7e"} Oct 11 04:08:38 crc kubenswrapper[4798]: I1011 04:08:38.902495 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-5777b4f897-l86k6" event={"ID":"e9fb9720-fc76-47a8-b111-63b5bc2899da","Type":"ContainerStarted","Data":"f8713f6d25ae9ffa5259ee31b689768ef480eeb1bccc274ff14877b69fc4aa47"} Oct 11 04:08:38 crc kubenswrapper[4798]: I1011 04:08:38.905230 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-64f84fcdbb-pvqqn" event={"ID":"7c70459a-7eb7-45e3-b6c0-e2b3c01e3cb7","Type":"ContainerStarted","Data":"c0234519bf2526046c02e43dd737e9e2642577b8dc929ea9e041d39d703cfb18"} Oct 11 04:08:38 crc kubenswrapper[4798]: I1011 04:08:38.939698 4798 generic.go:334] "Generic (PLEG): container finished" podID="09559c43-2ce0-4bcf-a5f5-b30a42d5fbd9" containerID="4a97859d8d96c11f44bcaabbabea627085d0ef182e07df68e5bc1f6acbb78c5b" exitCode=0 Oct 11 04:08:38 crc kubenswrapper[4798]: I1011 04:08:38.939729 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-ffcdd6c94-4t4ft"] Oct 11 04:08:38 crc kubenswrapper[4798]: I1011 04:08:38.939902 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-m7ldp" event={"ID":"09559c43-2ce0-4bcf-a5f5-b30a42d5fbd9","Type":"ContainerDied","Data":"4a97859d8d96c11f44bcaabbabea627085d0ef182e07df68e5bc1f6acbb78c5b"} Oct 11 04:08:38 crc kubenswrapper[4798]: I1011 04:08:38.939982 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-m7ldp" event={"ID":"09559c43-2ce0-4bcf-a5f5-b30a42d5fbd9","Type":"ContainerStarted","Data":"1820131e4e89f460615d99d78100b1de144bc8b3cfd5280b8f9294b3e790c4b4"} Oct 11 04:08:38 crc kubenswrapper[4798]: I1011 04:08:38.946449 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-578874c84d-zg2f8"] Oct 11 04:08:38 crc kubenswrapper[4798]: I1011 04:08:38.956873 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-6d74794d9b-vpg7r" event={"ID":"332f7594-84b2-4761-a067-a32c31469e4f","Type":"ContainerStarted","Data":"b82ddf6a450211bf73221c74e0d9b90c457fee89c134101cb59a2204939ebe51"} Oct 11 04:08:38 crc kubenswrapper[4798]: E1011 04:08:38.982274 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/infra-operator-controller-manager-585fc5b659-zsdqq" podUID="01834c42-7ee0-4576-9136-5812fc37e1cc" Oct 11 04:08:38 crc kubenswrapper[4798]: E1011 04:08:38.982971 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/horizon-operator-controller-manager-6d74794d9b-vpg7r" podUID="332f7594-84b2-4761-a067-a32c31469e4f" Oct 11 04:08:38 crc kubenswrapper[4798]: I1011 04:08:38.994591 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-687df44cdb-x5n77" event={"ID":"2c18a2f7-9a42-495c-bc9c-86750a381f5c","Type":"ContainerStarted","Data":"ad9e13b22c3acbee095f57e20952548b720617f9bda08824c245f9a9d8149701"} Oct 11 04:08:39 crc kubenswrapper[4798]: I1011 04:08:39.002149 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-6cc7fb757dqqlc4"] Oct 11 04:08:39 crc kubenswrapper[4798]: I1011 04:08:39.007141 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-646675d848-68hdd"] Oct 11 04:08:39 crc kubenswrapper[4798]: I1011 04:08:39.010833 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-6d9967f8dd-lgszb" event={"ID":"cc1b32c4-acec-47f4-95ce-88763c33ca81","Type":"ContainerStarted","Data":"2a9703266bb71e5fd8455e7bd3a249efb8f221801b51646143d241120ebe8a2c"} Oct 11 04:08:39 crc kubenswrapper[4798]: I1011 04:08:39.012650 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-5f4d5dfdc6-fffgl"] Oct 11 04:08:39 crc kubenswrapper[4798]: E1011 04:08:39.014440 4798 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/openstack-baremetal-operator@sha256:a17fc270857869fd1efe5020b2a1cb8c2abbd838f08de88f3a6a59e8754ec351,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:true,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/openstack-k8s-operators/openstack-baremetal-operator-agent:latest,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_ANSIBLEEE_IMAGE_URL_DEFAULT,Value:quay.io/openstack-k8s-operators/openstack-ansibleee-runner:latest,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_AODH_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-aodh-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_AODH_EVALUATOR_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-aodh-evaluator:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_AODH_LISTENER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-aodh-listener:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_AODH_NOTIFIER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-aodh-notifier:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_APACHE_IMAGE_URL_DEFAULT,Value:registry.redhat.io/ubi9/httpd-24:latest,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_BARBICAN_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-barbican-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_BARBICAN_KEYSTONE_LISTENER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-barbican-keystone-listener:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_BARBICAN_WORKER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-barbican-worker:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_CENTRAL_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ceilometer-central:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_COMPUTE_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ceilometer-compute:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_IPMI_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_MYSQLD_EXPORTER_IMAGE_URL_DEFAULT,Value:quay.io/prometheus/mysqld-exporter:v0.15.1,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_NOTIFICATION_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ceilometer-notification:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_SGCORE_IMAGE_URL_DEFAULT,Value:quay.io/openstack-k8s-operators/sg-core:latest,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CINDER_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CINDER_BACKUP_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-cinder-backup:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CINDER_SCHEDULER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-cinder-scheduler:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CINDER_VOLUME_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-cinder-volume:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_BACKENDBIND9_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-backend-bind9:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_CENTRAL_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-central:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_MDNS_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-mdns:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_PRODUCER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-producer:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_UNBOUND_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-unbound:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_WORKER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-worker:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_FRR_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-frr:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_ISCSID_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-iscsid:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_KEPLER_IMAGE_URL_DEFAULT,Value:quay.io/sustainable_computing_io/kepler:release-0.7.12,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_LOGROTATE_CROND_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-cron:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_MULTIPATHD_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-multipathd:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_NEUTRON_DHCP_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-neutron-dhcp-agent:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_NEUTRON_METADATA_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_NEUTRON_OVN_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-neutron-ovn-agent:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_NEUTRON_SRIOV_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-neutron-sriov-agent:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_NODE_EXPORTER_IMAGE_URL_DEFAULT,Value:quay.io/prometheus/node-exporter:v1.5.0,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_OVN_BGP_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-bgp-agent:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_PODMAN_EXPORTER_IMAGE_URL_DEFAULT,Value:quay.io/navidys/prometheus-podman-exporter:v1.10.1,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_GLANCE_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-glance-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_HEAT_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-heat-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_HEAT_CFNAPI_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-heat-api-cfn:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_HEAT_ENGINE_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-heat-engine:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_HORIZON_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-horizon:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_INFRA_MEMCACHED_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-memcached:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_INFRA_REDIS_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-redis:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ironic-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_CONDUCTOR_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ironic-conductor:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_INSPECTOR_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ironic-inspector:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_NEUTRON_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ironic-neutron-agent:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_PXE_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ironic-pxe:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_PYTHON_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/ironic-python-agent:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_KEYSTONE_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-keystone:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_KSM_IMAGE_URL_DEFAULT,Value:registry.k8s.io/kube-state-metrics/kube-state-metrics:v2.15.0,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_MANILA_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-manila-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_MANILA_SCHEDULER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-manila-scheduler:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_MANILA_SHARE_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-manila-share:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_MARIADB_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-mariadb:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NET_UTILS_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-netutils:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NEUTRON_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NOVA_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-nova-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NOVA_COMPUTE_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-nova-compute:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NOVA_CONDUCTOR_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-nova-conductor:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NOVA_NOVNC_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-nova-novncproxy:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NOVA_SCHEDULER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-nova-scheduler:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OCTAVIA_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-octavia-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OCTAVIA_HEALTHMANAGER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-octavia-health-manager:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OCTAVIA_HOUSEKEEPING_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-octavia-housekeeping:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OCTAVIA_RSYSLOG_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-rsyslog:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OCTAVIA_WORKER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-octavia-worker:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OPENSTACK_CLIENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-openstackclient:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OPENSTACK_MUST_GATHER_DEFAULT,Value:quay.io/openstack-k8s-operators/openstack-must-gather:latest,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OPENSTACK_NETWORK_EXPORTER_IMAGE_URL_DEFAULT,Value:quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OS_CONTAINER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/edpm-hardened-uefi:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OVN_CONTROLLER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OVN_CONTROLLER_OVS_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-base:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OVN_NB_DBCLUSTER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-nb-db-server:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OVN_NORTHD_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-northd:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OVN_SB_DBCLUSTER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-sb-db-server:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_PLACEMENT_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-placement-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_RABBITMQ_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_SWIFT_ACCOUNT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-swift-account:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_SWIFT_CONTAINER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-swift-container:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_SWIFT_OBJECT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-swift-object:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_SWIFT_PROXY_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-swift-proxy-server:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_TEST_TEMPEST_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-tempest-all:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_WATCHER_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-master-centos9/openstack-watcher-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_WATCHER_APPLIER_IMAGE_URL_DEFAULT,Value:quay.io/podified-master-centos9/openstack-watcher-applier:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_WATCHER_DECISION_ENGINE_IMAGE_URL_DEFAULT,Value:quay.io/podified-master-centos9/openstack-watcher-decision-engine:current-podified,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:cert,ReadOnly:true,MountPath:/tmp/k8s-webhook-server/serving-certs,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-s6k6f,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod openstack-baremetal-operator-controller-manager-6cc7fb757dqqlc4_openstack-operators(60303330-3699-43be-ba28-96cd788e1cf0): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Oct 11 04:08:39 crc kubenswrapper[4798]: I1011 04:08:39.014656 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-6d7c7ddf95-l9z67" event={"ID":"13f1e30f-4d2b-4517-86ae-8dc8334b0841","Type":"ContainerStarted","Data":"9f40c862ed16e4d12dde8e1fb71a6075d644d790d11288d467ebfb918bb9d9e9"} Oct 11 04:08:39 crc kubenswrapper[4798]: E1011 04:08:39.019229 4798 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/watcher-operator@sha256:98a5233f0596591acdf2c6a5838b08be108787cdb6ad1995b2b7886bac0fe6ca,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-qt44t,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod watcher-operator-controller-manager-646675d848-68hdd_openstack-operators(eb92bfae-ca3f-4e70-9fa1-440a6154cb53): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Oct 11 04:08:39 crc kubenswrapper[4798]: E1011 04:08:39.019567 4798 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/telemetry-operator@sha256:abe978f8da75223de5043cca50278ad4e28c8dd309883f502fe1e7a9998733b0,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-jgjcr,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod telemetry-operator-controller-manager-578874c84d-zg2f8_openstack-operators(0aecec3e-5bdb-4da4-b0a5-b3f9c7512a94): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Oct 11 04:08:39 crc kubenswrapper[4798]: I1011 04:08:39.022212 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-8h9p4"] Oct 11 04:08:39 crc kubenswrapper[4798]: I1011 04:08:39.025299 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-869cc7797f-t29x4" event={"ID":"9b7c0cf6-604f-43aa-ad27-0a2f54507751","Type":"ContainerStarted","Data":"6dba3316a660b304ece2c07c70803fe913e653e7b0f896edb42561b60beeb17b"} Oct 11 04:08:39 crc kubenswrapper[4798]: E1011 04:08:39.030205 4798 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/swift-operator@sha256:4b4a17fe08ce00e375afaaec6a28835f5c1784f03d11c4558376ac04130f3a9e,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-b7fvl,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod swift-operator-controller-manager-5f4d5dfdc6-fffgl_openstack-operators(f0952c0e-9c58-446c-90eb-61c0f8f4d64a): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Oct 11 04:08:39 crc kubenswrapper[4798]: I1011 04:08:39.075862 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-585fc5b659-zsdqq" event={"ID":"01834c42-7ee0-4576-9136-5812fc37e1cc","Type":"ContainerStarted","Data":"a0255ba646e18b45cee53ed8f66f007bbe734014657f74c75b045be15d9dfdb8"} Oct 11 04:08:39 crc kubenswrapper[4798]: I1011 04:08:39.082029 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-664664cb68-jkj5p" event={"ID":"d7c1b935-ea3d-4d7b-96ab-5eb98fbaaea8","Type":"ContainerStarted","Data":"e463c22115880cd3c1ced8288080d81044ffc8ca7c547a8aafcdf1502294a244"} Oct 11 04:08:39 crc kubenswrapper[4798]: E1011 04:08:39.094292 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/infra-operator@sha256:5cfb2ae1092445950b39dd59caa9a8c9367f42fb8353a8c3848d3bc729f24492\\\"\"" pod="openstack-operators/infra-operator-controller-manager-585fc5b659-zsdqq" podUID="01834c42-7ee0-4576-9136-5812fc37e1cc" Oct 11 04:08:39 crc kubenswrapper[4798]: E1011 04:08:39.094616 4798 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:operator,Image:quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2,Command:[/manager],Args:[],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:metrics,HostPort:0,ContainerPort:9782,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:OPERATOR_NAMESPACE,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{200 -3} {} 200m DecimalSI},memory: {{524288000 0} {} 500Mi BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-jbg87,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-cluster-operator-manager-5f97d8c699-8h9p4_openstack-operators(a368474b-c03b-43b2-96af-0bb78d9f6ee6): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Oct 11 04:08:39 crc kubenswrapper[4798]: E1011 04:08:39.096489 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-8h9p4" podUID="a368474b-c03b-43b2-96af-0bb78d9f6ee6" Oct 11 04:08:39 crc kubenswrapper[4798]: I1011 04:08:39.097851 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-manager-7d8c4595d6-d6fzt"] Oct 11 04:08:39 crc kubenswrapper[4798]: I1011 04:08:39.113362 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-57bb74c7bf-6rfxm" event={"ID":"7f1693fd-2ec7-4047-a2df-ffa5d7c94e67","Type":"ContainerStarted","Data":"dde7c20fbddd85e38a9da9250ff284a1a238cf407c3fcccc9630002dd02d32b9"} Oct 11 04:08:39 crc kubenswrapper[4798]: I1011 04:08:39.118625 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-59578bc799-7kfp8" event={"ID":"2448a4a6-cf4c-45ec-89cc-25621f444988","Type":"ContainerStarted","Data":"68bd6e40be7177b22fe1397e9114003a5537bb31f46988258a002115309d41a0"} Oct 11 04:08:39 crc kubenswrapper[4798]: I1011 04:08:39.120051 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-797d478b46-rwhzb" event={"ID":"48b2c137-9726-4e99-b0ca-a3cdb3bc6a1b","Type":"ContainerStarted","Data":"0a231d59158876008ab3482b86365f59bcf2cd96b0cc9c43639a76fb4dacb9bd"} Oct 11 04:08:39 crc kubenswrapper[4798]: I1011 04:08:39.127173 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-ddb98f99b-npmgn" event={"ID":"2fe367a2-f6f5-47ea-97a5-75a8b79778fb","Type":"ContainerStarted","Data":"c90e612809a17c515d4e277e16edecb23308eba6378802ba8915baffe0e8de64"} Oct 11 04:08:39 crc kubenswrapper[4798]: E1011 04:08:39.335239 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6cc7fb757dqqlc4" podUID="60303330-3699-43be-ba28-96cd788e1cf0" Oct 11 04:08:39 crc kubenswrapper[4798]: E1011 04:08:39.336165 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/watcher-operator-controller-manager-646675d848-68hdd" podUID="eb92bfae-ca3f-4e70-9fa1-440a6154cb53" Oct 11 04:08:39 crc kubenswrapper[4798]: E1011 04:08:39.352510 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/telemetry-operator-controller-manager-578874c84d-zg2f8" podUID="0aecec3e-5bdb-4da4-b0a5-b3f9c7512a94" Oct 11 04:08:39 crc kubenswrapper[4798]: E1011 04:08:39.423670 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/swift-operator-controller-manager-5f4d5dfdc6-fffgl" podUID="f0952c0e-9c58-446c-90eb-61c0f8f4d64a" Oct 11 04:08:40 crc kubenswrapper[4798]: I1011 04:08:40.171244 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-5f4d5dfdc6-fffgl" event={"ID":"f0952c0e-9c58-446c-90eb-61c0f8f4d64a","Type":"ContainerStarted","Data":"a4b41070700cbc2ed349847e207de0548a266ff1dc0cae96ee49ed62ec2abaec"} Oct 11 04:08:40 crc kubenswrapper[4798]: I1011 04:08:40.171300 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-5f4d5dfdc6-fffgl" event={"ID":"f0952c0e-9c58-446c-90eb-61c0f8f4d64a","Type":"ContainerStarted","Data":"8b775798224094fce63309b664ecbcbec05a1a9f761c409b5b982dfa95532642"} Oct 11 04:08:40 crc kubenswrapper[4798]: E1011 04:08:40.177092 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/swift-operator@sha256:4b4a17fe08ce00e375afaaec6a28835f5c1784f03d11c4558376ac04130f3a9e\\\"\"" pod="openstack-operators/swift-operator-controller-manager-5f4d5dfdc6-fffgl" podUID="f0952c0e-9c58-446c-90eb-61c0f8f4d64a" Oct 11 04:08:40 crc kubenswrapper[4798]: I1011 04:08:40.179906 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-m7ldp" event={"ID":"09559c43-2ce0-4bcf-a5f5-b30a42d5fbd9","Type":"ContainerStarted","Data":"a04a377cc8f223fb8a450c1787d84b5fdba69deb3645cfccd5974701b3546dd1"} Oct 11 04:08:40 crc kubenswrapper[4798]: I1011 04:08:40.187333 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-6d74794d9b-vpg7r" event={"ID":"332f7594-84b2-4761-a067-a32c31469e4f","Type":"ContainerStarted","Data":"6405c2b8447d7d6baea1fdbbc461ae2601938eab52fb6da0df6b8ce2277e20a0"} Oct 11 04:08:40 crc kubenswrapper[4798]: E1011 04:08:40.188978 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/horizon-operator@sha256:063a7e65b4ba98f0506f269ff7525b446eae06a5ed4a61c18ffa33a886500867\\\"\"" pod="openstack-operators/horizon-operator-controller-manager-6d74794d9b-vpg7r" podUID="332f7594-84b2-4761-a067-a32c31469e4f" Oct 11 04:08:40 crc kubenswrapper[4798]: I1011 04:08:40.200824 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-7d8c4595d6-d6fzt" event={"ID":"a0f0b4cf-9851-4d84-81e0-7130b9777793","Type":"ContainerStarted","Data":"85f121e9ae928fc30f8c75ed9821f26a14b52122410bf443fc108b9b9a3cc3e1"} Oct 11 04:08:40 crc kubenswrapper[4798]: I1011 04:08:40.200875 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-7d8c4595d6-d6fzt" event={"ID":"a0f0b4cf-9851-4d84-81e0-7130b9777793","Type":"ContainerStarted","Data":"cbf3ed9f37cd17bbaed4e4cc4d93dd9a60ae6d232b79c8b13f1de7e6c6c1b8fa"} Oct 11 04:08:40 crc kubenswrapper[4798]: I1011 04:08:40.200887 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-7d8c4595d6-d6fzt" event={"ID":"a0f0b4cf-9851-4d84-81e0-7130b9777793","Type":"ContainerStarted","Data":"7aa89c05a5bab669841de0f846245d633f05f960a9c1784ae5e49b818e074495"} Oct 11 04:08:40 crc kubenswrapper[4798]: I1011 04:08:40.201603 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-manager-7d8c4595d6-d6fzt" Oct 11 04:08:40 crc kubenswrapper[4798]: I1011 04:08:40.212462 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-646675d848-68hdd" event={"ID":"eb92bfae-ca3f-4e70-9fa1-440a6154cb53","Type":"ContainerStarted","Data":"c1670de5be21fe92dc4dcab5f9326837a00f162efb7a9a4243158d4ff5252624"} Oct 11 04:08:40 crc kubenswrapper[4798]: I1011 04:08:40.212510 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-646675d848-68hdd" event={"ID":"eb92bfae-ca3f-4e70-9fa1-440a6154cb53","Type":"ContainerStarted","Data":"dbdfee2fad4e14c607f166aa55a23847631e9545bad0a8be48335572c0fa854b"} Oct 11 04:08:40 crc kubenswrapper[4798]: E1011 04:08:40.218681 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/watcher-operator@sha256:98a5233f0596591acdf2c6a5838b08be108787cdb6ad1995b2b7886bac0fe6ca\\\"\"" pod="openstack-operators/watcher-operator-controller-manager-646675d848-68hdd" podUID="eb92bfae-ca3f-4e70-9fa1-440a6154cb53" Oct 11 04:08:40 crc kubenswrapper[4798]: I1011 04:08:40.223289 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-585fc5b659-zsdqq" event={"ID":"01834c42-7ee0-4576-9136-5812fc37e1cc","Type":"ContainerStarted","Data":"26070f62b256d6b76d8305ae80961664e5f34f0a3dc74f4ecca0abe3dbcbdbc4"} Oct 11 04:08:40 crc kubenswrapper[4798]: E1011 04:08:40.228973 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/infra-operator@sha256:5cfb2ae1092445950b39dd59caa9a8c9367f42fb8353a8c3848d3bc729f24492\\\"\"" pod="openstack-operators/infra-operator-controller-manager-585fc5b659-zsdqq" podUID="01834c42-7ee0-4576-9136-5812fc37e1cc" Oct 11 04:08:40 crc kubenswrapper[4798]: I1011 04:08:40.243047 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-578874c84d-zg2f8" event={"ID":"0aecec3e-5bdb-4da4-b0a5-b3f9c7512a94","Type":"ContainerStarted","Data":"a38377a2fd4f282422108df68495376b9d9eab52382f5a061cb6e78784ec59ec"} Oct 11 04:08:40 crc kubenswrapper[4798]: I1011 04:08:40.243108 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-578874c84d-zg2f8" event={"ID":"0aecec3e-5bdb-4da4-b0a5-b3f9c7512a94","Type":"ContainerStarted","Data":"858197bdf191fcc321940b21f607351ef86716d97b441a35c12614dc9fb40450"} Oct 11 04:08:40 crc kubenswrapper[4798]: E1011 04:08:40.247222 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/telemetry-operator@sha256:abe978f8da75223de5043cca50278ad4e28c8dd309883f502fe1e7a9998733b0\\\"\"" pod="openstack-operators/telemetry-operator-controller-manager-578874c84d-zg2f8" podUID="0aecec3e-5bdb-4da4-b0a5-b3f9c7512a94" Oct 11 04:08:40 crc kubenswrapper[4798]: I1011 04:08:40.259769 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-ffcdd6c94-4t4ft" event={"ID":"2c332094-6f91-4065-829a-736e42bd6560","Type":"ContainerStarted","Data":"f2695689105044f0e44f8096ef348ddec10fb6f53bec98b526f04803a4a8153f"} Oct 11 04:08:40 crc kubenswrapper[4798]: I1011 04:08:40.277838 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-8h9p4" event={"ID":"a368474b-c03b-43b2-96af-0bb78d9f6ee6","Type":"ContainerStarted","Data":"b0bd0121d5e684f2a26e303afccd230415296563cc681028078f88f19b5c8bd5"} Oct 11 04:08:40 crc kubenswrapper[4798]: E1011 04:08:40.280197 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2\\\"\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-8h9p4" podUID="a368474b-c03b-43b2-96af-0bb78d9f6ee6" Oct 11 04:08:40 crc kubenswrapper[4798]: I1011 04:08:40.280955 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6cc7fb757dqqlc4" event={"ID":"60303330-3699-43be-ba28-96cd788e1cf0","Type":"ContainerStarted","Data":"d21f045bfcc3f71d62170745c0dba7249d1d4176f095d53107c254fc8345f08c"} Oct 11 04:08:40 crc kubenswrapper[4798]: I1011 04:08:40.280997 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6cc7fb757dqqlc4" event={"ID":"60303330-3699-43be-ba28-96cd788e1cf0","Type":"ContainerStarted","Data":"8bd2525084c7d3c7e7da801e8b6f43b11b453deff190e067c037aaad15692091"} Oct 11 04:08:40 crc kubenswrapper[4798]: E1011 04:08:40.282503 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/openstack-baremetal-operator@sha256:a17fc270857869fd1efe5020b2a1cb8c2abbd838f08de88f3a6a59e8754ec351\\\"\"" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6cc7fb757dqqlc4" podUID="60303330-3699-43be-ba28-96cd788e1cf0" Oct 11 04:08:40 crc kubenswrapper[4798]: I1011 04:08:40.327712 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-controller-manager-7d8c4595d6-d6fzt" podStartSLOduration=4.32769278 podStartE2EDuration="4.32769278s" podCreationTimestamp="2025-10-11 04:08:36 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 04:08:40.326021691 +0000 UTC m=+815.662311377" watchObservedRunningTime="2025-10-11 04:08:40.32769278 +0000 UTC m=+815.663982466" Oct 11 04:08:41 crc kubenswrapper[4798]: I1011 04:08:41.295100 4798 generic.go:334] "Generic (PLEG): container finished" podID="09559c43-2ce0-4bcf-a5f5-b30a42d5fbd9" containerID="a04a377cc8f223fb8a450c1787d84b5fdba69deb3645cfccd5974701b3546dd1" exitCode=0 Oct 11 04:08:41 crc kubenswrapper[4798]: I1011 04:08:41.295176 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-m7ldp" event={"ID":"09559c43-2ce0-4bcf-a5f5-b30a42d5fbd9","Type":"ContainerDied","Data":"a04a377cc8f223fb8a450c1787d84b5fdba69deb3645cfccd5974701b3546dd1"} Oct 11 04:08:41 crc kubenswrapper[4798]: E1011 04:08:41.297241 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2\\\"\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-8h9p4" podUID="a368474b-c03b-43b2-96af-0bb78d9f6ee6" Oct 11 04:08:41 crc kubenswrapper[4798]: E1011 04:08:41.297611 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/telemetry-operator@sha256:abe978f8da75223de5043cca50278ad4e28c8dd309883f502fe1e7a9998733b0\\\"\"" pod="openstack-operators/telemetry-operator-controller-manager-578874c84d-zg2f8" podUID="0aecec3e-5bdb-4da4-b0a5-b3f9c7512a94" Oct 11 04:08:41 crc kubenswrapper[4798]: E1011 04:08:41.298003 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/watcher-operator@sha256:98a5233f0596591acdf2c6a5838b08be108787cdb6ad1995b2b7886bac0fe6ca\\\"\"" pod="openstack-operators/watcher-operator-controller-manager-646675d848-68hdd" podUID="eb92bfae-ca3f-4e70-9fa1-440a6154cb53" Oct 11 04:08:41 crc kubenswrapper[4798]: E1011 04:08:41.298120 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/swift-operator@sha256:4b4a17fe08ce00e375afaaec6a28835f5c1784f03d11c4558376ac04130f3a9e\\\"\"" pod="openstack-operators/swift-operator-controller-manager-5f4d5dfdc6-fffgl" podUID="f0952c0e-9c58-446c-90eb-61c0f8f4d64a" Oct 11 04:08:41 crc kubenswrapper[4798]: E1011 04:08:41.298221 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/horizon-operator@sha256:063a7e65b4ba98f0506f269ff7525b446eae06a5ed4a61c18ffa33a886500867\\\"\"" pod="openstack-operators/horizon-operator-controller-manager-6d74794d9b-vpg7r" podUID="332f7594-84b2-4761-a067-a32c31469e4f" Oct 11 04:08:41 crc kubenswrapper[4798]: E1011 04:08:41.299348 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/openstack-baremetal-operator@sha256:a17fc270857869fd1efe5020b2a1cb8c2abbd838f08de88f3a6a59e8754ec351\\\"\"" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6cc7fb757dqqlc4" podUID="60303330-3699-43be-ba28-96cd788e1cf0" Oct 11 04:08:41 crc kubenswrapper[4798]: E1011 04:08:41.305372 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/infra-operator@sha256:5cfb2ae1092445950b39dd59caa9a8c9367f42fb8353a8c3848d3bc729f24492\\\"\"" pod="openstack-operators/infra-operator-controller-manager-585fc5b659-zsdqq" podUID="01834c42-7ee0-4576-9136-5812fc37e1cc" Oct 11 04:08:47 crc kubenswrapper[4798]: I1011 04:08:47.833935 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-manager-7d8c4595d6-d6fzt" Oct 11 04:08:54 crc kubenswrapper[4798]: E1011 04:08:54.337548 4798 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/test-operator@sha256:7e584b1c430441c8b6591dadeff32e065de8a185ad37ef90d2e08d37e59aab4a" Oct 11 04:08:54 crc kubenswrapper[4798]: E1011 04:08:54.338407 4798 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/test-operator@sha256:7e584b1c430441c8b6591dadeff32e065de8a185ad37ef90d2e08d37e59aab4a,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-7kn4l,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod test-operator-controller-manager-ffcdd6c94-4t4ft_openstack-operators(2c332094-6f91-4065-829a-736e42bd6560): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Oct 11 04:08:55 crc kubenswrapper[4798]: I1011 04:08:55.566069 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-b5ff5"] Oct 11 04:08:55 crc kubenswrapper[4798]: I1011 04:08:55.568275 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-b5ff5" Oct 11 04:08:55 crc kubenswrapper[4798]: I1011 04:08:55.587635 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9821447b-4298-4a37-b842-d157b9b453c6-utilities\") pod \"redhat-marketplace-b5ff5\" (UID: \"9821447b-4298-4a37-b842-d157b9b453c6\") " pod="openshift-marketplace/redhat-marketplace-b5ff5" Oct 11 04:08:55 crc kubenswrapper[4798]: I1011 04:08:55.588014 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zsngr\" (UniqueName: \"kubernetes.io/projected/9821447b-4298-4a37-b842-d157b9b453c6-kube-api-access-zsngr\") pod \"redhat-marketplace-b5ff5\" (UID: \"9821447b-4298-4a37-b842-d157b9b453c6\") " pod="openshift-marketplace/redhat-marketplace-b5ff5" Oct 11 04:08:55 crc kubenswrapper[4798]: I1011 04:08:55.588050 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9821447b-4298-4a37-b842-d157b9b453c6-catalog-content\") pod \"redhat-marketplace-b5ff5\" (UID: \"9821447b-4298-4a37-b842-d157b9b453c6\") " pod="openshift-marketplace/redhat-marketplace-b5ff5" Oct 11 04:08:55 crc kubenswrapper[4798]: I1011 04:08:55.595070 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-b5ff5"] Oct 11 04:08:55 crc kubenswrapper[4798]: I1011 04:08:55.690118 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zsngr\" (UniqueName: \"kubernetes.io/projected/9821447b-4298-4a37-b842-d157b9b453c6-kube-api-access-zsngr\") pod \"redhat-marketplace-b5ff5\" (UID: \"9821447b-4298-4a37-b842-d157b9b453c6\") " pod="openshift-marketplace/redhat-marketplace-b5ff5" Oct 11 04:08:55 crc kubenswrapper[4798]: I1011 04:08:55.690213 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9821447b-4298-4a37-b842-d157b9b453c6-catalog-content\") pod \"redhat-marketplace-b5ff5\" (UID: \"9821447b-4298-4a37-b842-d157b9b453c6\") " pod="openshift-marketplace/redhat-marketplace-b5ff5" Oct 11 04:08:55 crc kubenswrapper[4798]: I1011 04:08:55.690332 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9821447b-4298-4a37-b842-d157b9b453c6-utilities\") pod \"redhat-marketplace-b5ff5\" (UID: \"9821447b-4298-4a37-b842-d157b9b453c6\") " pod="openshift-marketplace/redhat-marketplace-b5ff5" Oct 11 04:08:55 crc kubenswrapper[4798]: I1011 04:08:55.690932 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9821447b-4298-4a37-b842-d157b9b453c6-catalog-content\") pod \"redhat-marketplace-b5ff5\" (UID: \"9821447b-4298-4a37-b842-d157b9b453c6\") " pod="openshift-marketplace/redhat-marketplace-b5ff5" Oct 11 04:08:55 crc kubenswrapper[4798]: I1011 04:08:55.691009 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9821447b-4298-4a37-b842-d157b9b453c6-utilities\") pod \"redhat-marketplace-b5ff5\" (UID: \"9821447b-4298-4a37-b842-d157b9b453c6\") " pod="openshift-marketplace/redhat-marketplace-b5ff5" Oct 11 04:08:55 crc kubenswrapper[4798]: I1011 04:08:55.720486 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zsngr\" (UniqueName: \"kubernetes.io/projected/9821447b-4298-4a37-b842-d157b9b453c6-kube-api-access-zsngr\") pod \"redhat-marketplace-b5ff5\" (UID: \"9821447b-4298-4a37-b842-d157b9b453c6\") " pod="openshift-marketplace/redhat-marketplace-b5ff5" Oct 11 04:08:55 crc kubenswrapper[4798]: I1011 04:08:55.886876 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-b5ff5" Oct 11 04:08:57 crc kubenswrapper[4798]: E1011 04:08:57.414269 4798 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/keystone-operator@sha256:79b43a69884631c635d2164b95a2d4ec68f5cb33f96da14764f1c710880f3997" Oct 11 04:08:57 crc kubenswrapper[4798]: E1011 04:08:57.414864 4798 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/keystone-operator@sha256:79b43a69884631c635d2164b95a2d4ec68f5cb33f96da14764f1c710880f3997,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-gxqvp,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod keystone-operator-controller-manager-ddb98f99b-npmgn_openstack-operators(2fe367a2-f6f5-47ea-97a5-75a8b79778fb): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Oct 11 04:08:57 crc kubenswrapper[4798]: E1011 04:08:57.892731 4798 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/manila-operator@sha256:582f7b1e411961b69f2e3c6b346aa25759b89f7720ed3fade1d363bf5d2dffc8" Oct 11 04:08:57 crc kubenswrapper[4798]: E1011 04:08:57.893048 4798 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/manila-operator@sha256:582f7b1e411961b69f2e3c6b346aa25759b89f7720ed3fade1d363bf5d2dffc8,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-9k2k6,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod manila-operator-controller-manager-59578bc799-7kfp8_openstack-operators(2448a4a6-cf4c-45ec-89cc-25621f444988): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Oct 11 04:08:57 crc kubenswrapper[4798]: E1011 04:08:57.978600 4798 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.66:5001/openstack-k8s-operators/glance-operator:07f91ae2b9667cd736ffe7adf6d7baf48ada5140" Oct 11 04:08:57 crc kubenswrapper[4798]: E1011 04:08:57.978712 4798 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.66:5001/openstack-k8s-operators/glance-operator:07f91ae2b9667cd736ffe7adf6d7baf48ada5140" Oct 11 04:08:57 crc kubenswrapper[4798]: E1011 04:08:57.978887 4798 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:38.102.83.66:5001/openstack-k8s-operators/glance-operator:07f91ae2b9667cd736ffe7adf6d7baf48ada5140,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-w24b6,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod glance-operator-controller-manager-7f5fc6b5ff-4zk9f_openstack-operators(4490c7b3-a500-4224-8597-abd71de4fa13): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Oct 11 04:08:58 crc kubenswrapper[4798]: E1011 04:08:58.530807 4798 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/ironic-operator@sha256:ee05f2b06405240a8fcdbd430a9e8983b4667f372548334307b68c154e389960" Oct 11 04:08:58 crc kubenswrapper[4798]: E1011 04:08:58.530974 4798 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/ironic-operator@sha256:ee05f2b06405240a8fcdbd430a9e8983b4667f372548334307b68c154e389960,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-6sj5f,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ironic-operator-controller-manager-74cb5cbc49-p96zk_openstack-operators(a168d198-c58b-4674-8286-72fb306036b2): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Oct 11 04:09:01 crc kubenswrapper[4798]: E1011 04:09:01.820091 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/test-operator-controller-manager-ffcdd6c94-4t4ft" podUID="2c332094-6f91-4065-829a-736e42bd6560" Oct 11 04:09:01 crc kubenswrapper[4798]: E1011 04:09:01.956532 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/keystone-operator-controller-manager-ddb98f99b-npmgn" podUID="2fe367a2-f6f5-47ea-97a5-75a8b79778fb" Oct 11 04:09:01 crc kubenswrapper[4798]: E1011 04:09:01.964456 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/glance-operator-controller-manager-7f5fc6b5ff-4zk9f" podUID="4490c7b3-a500-4224-8597-abd71de4fa13" Oct 11 04:09:01 crc kubenswrapper[4798]: I1011 04:09:01.989642 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-b5ff5"] Oct 11 04:09:02 crc kubenswrapper[4798]: E1011 04:09:02.005238 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/ironic-operator-controller-manager-74cb5cbc49-p96zk" podUID="a168d198-c58b-4674-8286-72fb306036b2" Oct 11 04:09:02 crc kubenswrapper[4798]: W1011 04:09:02.012985 4798 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9821447b_4298_4a37_b842_d157b9b453c6.slice/crio-cf38d105523e5d9be85f798647f1babee51dbecf803deae628d0da9867131598 WatchSource:0}: Error finding container cf38d105523e5d9be85f798647f1babee51dbecf803deae628d0da9867131598: Status 404 returned error can't find the container with id cf38d105523e5d9be85f798647f1babee51dbecf803deae628d0da9867131598 Oct 11 04:09:02 crc kubenswrapper[4798]: E1011 04:09:02.203912 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/manila-operator-controller-manager-59578bc799-7kfp8" podUID="2448a4a6-cf4c-45ec-89cc-25621f444988" Oct 11 04:09:02 crc kubenswrapper[4798]: I1011 04:09:02.449252 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-5777b4f897-l86k6" event={"ID":"e9fb9720-fc76-47a8-b111-63b5bc2899da","Type":"ContainerStarted","Data":"7a75e9a617be3ec989396bd6ea4c1542da1e1d4b1d0c4b8db90296ade8b6c115"} Oct 11 04:09:02 crc kubenswrapper[4798]: I1011 04:09:02.450347 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-6d9967f8dd-lgszb" event={"ID":"cc1b32c4-acec-47f4-95ce-88763c33ca81","Type":"ContainerStarted","Data":"e9c9743674f5ab28ea09033247558a1f8c2fa11784ec24fc798c05630cce2688"} Oct 11 04:09:02 crc kubenswrapper[4798]: I1011 04:09:02.454355 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-64f84fcdbb-pvqqn" event={"ID":"7c70459a-7eb7-45e3-b6c0-e2b3c01e3cb7","Type":"ContainerStarted","Data":"2fa43b922db0873f054b97d4ba2412d35be5180d90778d5ec39c82255ffc3a7a"} Oct 11 04:09:02 crc kubenswrapper[4798]: I1011 04:09:02.470576 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-59578bc799-7kfp8" event={"ID":"2448a4a6-cf4c-45ec-89cc-25621f444988","Type":"ContainerStarted","Data":"cd1d17110275a02f2a096d26d1847061e4930a2b974712c5f73e34d61e2487aa"} Oct 11 04:09:02 crc kubenswrapper[4798]: E1011 04:09:02.472234 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/manila-operator@sha256:582f7b1e411961b69f2e3c6b346aa25759b89f7720ed3fade1d363bf5d2dffc8\\\"\"" pod="openstack-operators/manila-operator-controller-manager-59578bc799-7kfp8" podUID="2448a4a6-cf4c-45ec-89cc-25621f444988" Oct 11 04:09:02 crc kubenswrapper[4798]: I1011 04:09:02.472878 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-b5ff5" event={"ID":"9821447b-4298-4a37-b842-d157b9b453c6","Type":"ContainerStarted","Data":"cf38d105523e5d9be85f798647f1babee51dbecf803deae628d0da9867131598"} Oct 11 04:09:02 crc kubenswrapper[4798]: I1011 04:09:02.483605 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-m7ldp" event={"ID":"09559c43-2ce0-4bcf-a5f5-b30a42d5fbd9","Type":"ContainerStarted","Data":"6c003327b012b3473480336fca398d96e16190112c3aa5b9fedb9d84eb3047eb"} Oct 11 04:09:02 crc kubenswrapper[4798]: I1011 04:09:02.489823 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-7f5fc6b5ff-4zk9f" event={"ID":"4490c7b3-a500-4224-8597-abd71de4fa13","Type":"ContainerStarted","Data":"3c963f23819968fc31c838e342217ada1c31f33f11447362499b070970731ee4"} Oct 11 04:09:02 crc kubenswrapper[4798]: E1011 04:09:02.492083 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"38.102.83.66:5001/openstack-k8s-operators/glance-operator:07f91ae2b9667cd736ffe7adf6d7baf48ada5140\\\"\"" pod="openstack-operators/glance-operator-controller-manager-7f5fc6b5ff-4zk9f" podUID="4490c7b3-a500-4224-8597-abd71de4fa13" Oct 11 04:09:02 crc kubenswrapper[4798]: I1011 04:09:02.493264 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-57bb74c7bf-6rfxm" event={"ID":"7f1693fd-2ec7-4047-a2df-ffa5d7c94e67","Type":"ContainerStarted","Data":"1d45f210f7c40e0cc14172b993910f546a32ac633cf067e511ebc5bc8e9d5852"} Oct 11 04:09:02 crc kubenswrapper[4798]: I1011 04:09:02.506983 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-869cc7797f-t29x4" event={"ID":"9b7c0cf6-604f-43aa-ad27-0a2f54507751","Type":"ContainerStarted","Data":"06489f6be85b648e3f0e278735910789f3e1ee9192f062089bf0cf459eff34e9"} Oct 11 04:09:02 crc kubenswrapper[4798]: I1011 04:09:02.516026 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-ddb98f99b-npmgn" event={"ID":"2fe367a2-f6f5-47ea-97a5-75a8b79778fb","Type":"ContainerStarted","Data":"226a475fd7b2461e7e9a3a62482be81f86861e59e6a133f79d43889047ed48dc"} Oct 11 04:09:02 crc kubenswrapper[4798]: E1011 04:09:02.520114 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/keystone-operator@sha256:79b43a69884631c635d2164b95a2d4ec68f5cb33f96da14764f1c710880f3997\\\"\"" pod="openstack-operators/keystone-operator-controller-manager-ddb98f99b-npmgn" podUID="2fe367a2-f6f5-47ea-97a5-75a8b79778fb" Oct 11 04:09:02 crc kubenswrapper[4798]: I1011 04:09:02.531077 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-5f4d5dfdc6-fffgl" event={"ID":"f0952c0e-9c58-446c-90eb-61c0f8f4d64a","Type":"ContainerStarted","Data":"9a721c1e61eeb720bf31c0820f686420ffad33860a924eda57bbaee8e6015754"} Oct 11 04:09:02 crc kubenswrapper[4798]: I1011 04:09:02.531627 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/swift-operator-controller-manager-5f4d5dfdc6-fffgl" Oct 11 04:09:02 crc kubenswrapper[4798]: I1011 04:09:02.542264 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-ffcdd6c94-4t4ft" event={"ID":"2c332094-6f91-4065-829a-736e42bd6560","Type":"ContainerStarted","Data":"9902db51fd815bb84b8b2a5d763d4bce19ce3329b6437d477d74ca9ec4ccd328"} Oct 11 04:09:02 crc kubenswrapper[4798]: E1011 04:09:02.543172 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/test-operator@sha256:7e584b1c430441c8b6591dadeff32e065de8a185ad37ef90d2e08d37e59aab4a\\\"\"" pod="openstack-operators/test-operator-controller-manager-ffcdd6c94-4t4ft" podUID="2c332094-6f91-4065-829a-736e42bd6560" Oct 11 04:09:02 crc kubenswrapper[4798]: I1011 04:09:02.555617 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-6d74794d9b-vpg7r" event={"ID":"332f7594-84b2-4761-a067-a32c31469e4f","Type":"ContainerStarted","Data":"e94af28e69aa80183a13a9800259a6817b17396d55c589f583306180fc94e374"} Oct 11 04:09:02 crc kubenswrapper[4798]: I1011 04:09:02.556499 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/horizon-operator-controller-manager-6d74794d9b-vpg7r" Oct 11 04:09:02 crc kubenswrapper[4798]: I1011 04:09:02.571064 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-687df44cdb-x5n77" event={"ID":"2c18a2f7-9a42-495c-bc9c-86750a381f5c","Type":"ContainerStarted","Data":"f0ec555708c86f9d48f0c252080f0ec1a496f96217851cd2ab0e7d2a967ca4a4"} Oct 11 04:09:02 crc kubenswrapper[4798]: I1011 04:09:02.602822 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-m7ldp" podStartSLOduration=5.40746366 podStartE2EDuration="26.602797213s" podCreationTimestamp="2025-10-11 04:08:36 +0000 UTC" firstStartedPulling="2025-10-11 04:08:38.942356026 +0000 UTC m=+814.278645712" lastFinishedPulling="2025-10-11 04:09:00.137689579 +0000 UTC m=+835.473979265" observedRunningTime="2025-10-11 04:09:02.59705374 +0000 UTC m=+837.933343426" watchObservedRunningTime="2025-10-11 04:09:02.602797213 +0000 UTC m=+837.939086909" Oct 11 04:09:02 crc kubenswrapper[4798]: I1011 04:09:02.619303 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-74cb5cbc49-p96zk" event={"ID":"a168d198-c58b-4674-8286-72fb306036b2","Type":"ContainerStarted","Data":"23aee9da3dd2a4aa9308b457495de4c392612330536db964c06e6f217eacbf45"} Oct 11 04:09:02 crc kubenswrapper[4798]: E1011 04:09:02.633172 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/ironic-operator@sha256:ee05f2b06405240a8fcdbd430a9e8983b4667f372548334307b68c154e389960\\\"\"" pod="openstack-operators/ironic-operator-controller-manager-74cb5cbc49-p96zk" podUID="a168d198-c58b-4674-8286-72fb306036b2" Oct 11 04:09:02 crc kubenswrapper[4798]: I1011 04:09:02.647198 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/horizon-operator-controller-manager-6d74794d9b-vpg7r" podStartSLOduration=4.709233383 podStartE2EDuration="27.647180918s" podCreationTimestamp="2025-10-11 04:08:35 +0000 UTC" firstStartedPulling="2025-10-11 04:08:38.684133837 +0000 UTC m=+814.020423523" lastFinishedPulling="2025-10-11 04:09:01.622081372 +0000 UTC m=+836.958371058" observedRunningTime="2025-10-11 04:09:02.633500459 +0000 UTC m=+837.969790145" watchObservedRunningTime="2025-10-11 04:09:02.647180918 +0000 UTC m=+837.983470594" Oct 11 04:09:02 crc kubenswrapper[4798]: I1011 04:09:02.684273 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/swift-operator-controller-manager-5f4d5dfdc6-fffgl" podStartSLOduration=4.162052748 podStartE2EDuration="26.684253342s" podCreationTimestamp="2025-10-11 04:08:36 +0000 UTC" firstStartedPulling="2025-10-11 04:08:39.029775724 +0000 UTC m=+814.366065420" lastFinishedPulling="2025-10-11 04:09:01.551976328 +0000 UTC m=+836.888266014" observedRunningTime="2025-10-11 04:09:02.681224792 +0000 UTC m=+838.017514478" watchObservedRunningTime="2025-10-11 04:09:02.684253342 +0000 UTC m=+838.020543028" Oct 11 04:09:02 crc kubenswrapper[4798]: I1011 04:09:02.699053 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-797d478b46-rwhzb" event={"ID":"48b2c137-9726-4e99-b0ca-a3cdb3bc6a1b","Type":"ContainerStarted","Data":"64c91879bc2b383e2d37d313386d440ffccf8a0a47fd6bc4d69de86c6b9aaac4"} Oct 11 04:09:02 crc kubenswrapper[4798]: I1011 04:09:02.712301 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-664664cb68-jkj5p" event={"ID":"d7c1b935-ea3d-4d7b-96ab-5eb98fbaaea8","Type":"ContainerStarted","Data":"2520877c33f4866fb7dd04ddd04cd9be4a6b6ae747320ddb9b1da5e1e6e4c897"} Oct 11 04:09:02 crc kubenswrapper[4798]: I1011 04:09:02.729673 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-59cdc64769-9cgvk" event={"ID":"755c871f-8030-429f-9b9f-5b1a6c1e24e0","Type":"ContainerStarted","Data":"55179e5682c92f697c9872091523184538acf26c7287f2a9f8f775723367b19d"} Oct 11 04:09:03 crc kubenswrapper[4798]: I1011 04:09:03.760336 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-8h9p4" event={"ID":"a368474b-c03b-43b2-96af-0bb78d9f6ee6","Type":"ContainerStarted","Data":"a820a1391e01ffb6183c6544058a499412d11340e8143ea30e865435e0b66376"} Oct 11 04:09:03 crc kubenswrapper[4798]: I1011 04:09:03.765736 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-6d9967f8dd-lgszb" event={"ID":"cc1b32c4-acec-47f4-95ce-88763c33ca81","Type":"ContainerStarted","Data":"5fb1d525b465ed8b3cbdda47c65288ea8c2e330b9a77de42f9e18c842464b9f7"} Oct 11 04:09:03 crc kubenswrapper[4798]: I1011 04:09:03.765934 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/heat-operator-controller-manager-6d9967f8dd-lgszb" Oct 11 04:09:03 crc kubenswrapper[4798]: I1011 04:09:03.769083 4798 generic.go:334] "Generic (PLEG): container finished" podID="9821447b-4298-4a37-b842-d157b9b453c6" containerID="70c790cecdd1be2cf05db3510c8af04a01afe23ff43c7ef0f199e78c9ce0a00e" exitCode=0 Oct 11 04:09:03 crc kubenswrapper[4798]: I1011 04:09:03.769183 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-b5ff5" event={"ID":"9821447b-4298-4a37-b842-d157b9b453c6","Type":"ContainerDied","Data":"70c790cecdd1be2cf05db3510c8af04a01afe23ff43c7ef0f199e78c9ce0a00e"} Oct 11 04:09:03 crc kubenswrapper[4798]: I1011 04:09:03.772538 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-664664cb68-jkj5p" event={"ID":"d7c1b935-ea3d-4d7b-96ab-5eb98fbaaea8","Type":"ContainerStarted","Data":"36597d9b45ede9f911dd09ab56f4bc47f031b91552caa91451b85c43d0095688"} Oct 11 04:09:03 crc kubenswrapper[4798]: I1011 04:09:03.772668 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/placement-operator-controller-manager-664664cb68-jkj5p" Oct 11 04:09:03 crc kubenswrapper[4798]: I1011 04:09:03.774487 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6cc7fb757dqqlc4" event={"ID":"60303330-3699-43be-ba28-96cd788e1cf0","Type":"ContainerStarted","Data":"82ceeade9679b5f84f0462da741a47c6d13d14dbcc8c93922f3810ff7e94b0d7"} Oct 11 04:09:03 crc kubenswrapper[4798]: I1011 04:09:03.775343 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6cc7fb757dqqlc4" Oct 11 04:09:03 crc kubenswrapper[4798]: I1011 04:09:03.777707 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-869cc7797f-t29x4" event={"ID":"9b7c0cf6-604f-43aa-ad27-0a2f54507751","Type":"ContainerStarted","Data":"c51eef4ccab9f457251fb32bbfaee3e52ec1d5d906e0a5ae07663f595e264405"} Oct 11 04:09:03 crc kubenswrapper[4798]: I1011 04:09:03.778449 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ovn-operator-controller-manager-869cc7797f-t29x4" Oct 11 04:09:03 crc kubenswrapper[4798]: I1011 04:09:03.779973 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-64f84fcdbb-pvqqn" event={"ID":"7c70459a-7eb7-45e3-b6c0-e2b3c01e3cb7","Type":"ContainerStarted","Data":"de807339dd0a8c926f7926593dea7aa4576c42de4c47bce3feac6902a2ef8f6e"} Oct 11 04:09:03 crc kubenswrapper[4798]: I1011 04:09:03.780360 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/barbican-operator-controller-manager-64f84fcdbb-pvqqn" Oct 11 04:09:03 crc kubenswrapper[4798]: I1011 04:09:03.783153 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-797d478b46-rwhzb" event={"ID":"48b2c137-9726-4e99-b0ca-a3cdb3bc6a1b","Type":"ContainerStarted","Data":"f287e4fc64bf4e77fc78db855a297e2597eeef965379b801653e19156d8d5127"} Oct 11 04:09:03 crc kubenswrapper[4798]: I1011 04:09:03.783287 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/neutron-operator-controller-manager-797d478b46-rwhzb" Oct 11 04:09:03 crc kubenswrapper[4798]: I1011 04:09:03.785114 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-687df44cdb-x5n77" event={"ID":"2c18a2f7-9a42-495c-bc9c-86750a381f5c","Type":"ContainerStarted","Data":"18227c64d7c2ebc62304f4db561afbec615ba767a0282364a22c6d8b90eb1a23"} Oct 11 04:09:03 crc kubenswrapper[4798]: I1011 04:09:03.785274 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/designate-operator-controller-manager-687df44cdb-x5n77" Oct 11 04:09:03 crc kubenswrapper[4798]: I1011 04:09:03.786977 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-59cdc64769-9cgvk" event={"ID":"755c871f-8030-429f-9b9f-5b1a6c1e24e0","Type":"ContainerStarted","Data":"660f25461adcfdfa213404cfa9068817b71272bee786292c32f6354247ba555a"} Oct 11 04:09:03 crc kubenswrapper[4798]: I1011 04:09:03.787103 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/cinder-operator-controller-manager-59cdc64769-9cgvk" Oct 11 04:09:03 crc kubenswrapper[4798]: I1011 04:09:03.788427 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-8h9p4" podStartSLOduration=5.255072868 podStartE2EDuration="27.788382411s" podCreationTimestamp="2025-10-11 04:08:36 +0000 UTC" firstStartedPulling="2025-10-11 04:08:39.094415701 +0000 UTC m=+814.430705387" lastFinishedPulling="2025-10-11 04:09:01.627725244 +0000 UTC m=+836.964014930" observedRunningTime="2025-10-11 04:09:03.781297226 +0000 UTC m=+839.117586912" watchObservedRunningTime="2025-10-11 04:09:03.788382411 +0000 UTC m=+839.124672097" Oct 11 04:09:03 crc kubenswrapper[4798]: I1011 04:09:03.788846 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-6d7c7ddf95-l9z67" event={"ID":"13f1e30f-4d2b-4517-86ae-8dc8334b0841","Type":"ContainerStarted","Data":"275b4081b43799cd5deb2640c3c48107e2b5944404ecd492ebab3ab15410db27"} Oct 11 04:09:03 crc kubenswrapper[4798]: I1011 04:09:03.788898 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-6d7c7ddf95-l9z67" event={"ID":"13f1e30f-4d2b-4517-86ae-8dc8334b0841","Type":"ContainerStarted","Data":"1c5213a597f2f0772bbeb512f3e5171d10a99cf91dee83b2e189e3fe10345a95"} Oct 11 04:09:03 crc kubenswrapper[4798]: I1011 04:09:03.788921 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/octavia-operator-controller-manager-6d7c7ddf95-l9z67" Oct 11 04:09:03 crc kubenswrapper[4798]: I1011 04:09:03.790717 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-57bb74c7bf-6rfxm" event={"ID":"7f1693fd-2ec7-4047-a2df-ffa5d7c94e67","Type":"ContainerStarted","Data":"f4da9921c914078b1e221b99fa3a27a4d5e9e8f086a9a78f885dfacb75102398"} Oct 11 04:09:03 crc kubenswrapper[4798]: I1011 04:09:03.790846 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/nova-operator-controller-manager-57bb74c7bf-6rfxm" Oct 11 04:09:03 crc kubenswrapper[4798]: I1011 04:09:03.792585 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-578874c84d-zg2f8" event={"ID":"0aecec3e-5bdb-4da4-b0a5-b3f9c7512a94","Type":"ContainerStarted","Data":"f3bbf180a4b88d6362c55675f214952db3961144e7d16a3a7591582466c694d8"} Oct 11 04:09:03 crc kubenswrapper[4798]: I1011 04:09:03.792785 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/telemetry-operator-controller-manager-578874c84d-zg2f8" Oct 11 04:09:03 crc kubenswrapper[4798]: I1011 04:09:03.794870 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-646675d848-68hdd" event={"ID":"eb92bfae-ca3f-4e70-9fa1-440a6154cb53","Type":"ContainerStarted","Data":"f6ac7ba5808789394b27e25577ef2670d2dfafafd8ad65d5c14a447e6799e482"} Oct 11 04:09:03 crc kubenswrapper[4798]: I1011 04:09:03.795036 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/watcher-operator-controller-manager-646675d848-68hdd" Oct 11 04:09:03 crc kubenswrapper[4798]: I1011 04:09:03.796911 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-5777b4f897-l86k6" event={"ID":"e9fb9720-fc76-47a8-b111-63b5bc2899da","Type":"ContainerStarted","Data":"9cda033f647a5b68ed9ff3b25020079b10664dca9f4a1b69539174850aaed2f7"} Oct 11 04:09:03 crc kubenswrapper[4798]: I1011 04:09:03.797315 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/mariadb-operator-controller-manager-5777b4f897-l86k6" Oct 11 04:09:03 crc kubenswrapper[4798]: I1011 04:09:03.801366 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-585fc5b659-zsdqq" event={"ID":"01834c42-7ee0-4576-9136-5812fc37e1cc","Type":"ContainerStarted","Data":"3f86f0e37d6df98072fe10da7d837175600387401779ae7da1076ff13e2a38d3"} Oct 11 04:09:03 crc kubenswrapper[4798]: E1011 04:09:03.803262 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/keystone-operator@sha256:79b43a69884631c635d2164b95a2d4ec68f5cb33f96da14764f1c710880f3997\\\"\"" pod="openstack-operators/keystone-operator-controller-manager-ddb98f99b-npmgn" podUID="2fe367a2-f6f5-47ea-97a5-75a8b79778fb" Oct 11 04:09:03 crc kubenswrapper[4798]: E1011 04:09:03.803287 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"38.102.83.66:5001/openstack-k8s-operators/glance-operator:07f91ae2b9667cd736ffe7adf6d7baf48ada5140\\\"\"" pod="openstack-operators/glance-operator-controller-manager-7f5fc6b5ff-4zk9f" podUID="4490c7b3-a500-4224-8597-abd71de4fa13" Oct 11 04:09:03 crc kubenswrapper[4798]: E1011 04:09:03.803378 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/test-operator@sha256:7e584b1c430441c8b6591dadeff32e065de8a185ad37ef90d2e08d37e59aab4a\\\"\"" pod="openstack-operators/test-operator-controller-manager-ffcdd6c94-4t4ft" podUID="2c332094-6f91-4065-829a-736e42bd6560" Oct 11 04:09:03 crc kubenswrapper[4798]: E1011 04:09:03.803519 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/ironic-operator@sha256:ee05f2b06405240a8fcdbd430a9e8983b4667f372548334307b68c154e389960\\\"\"" pod="openstack-operators/ironic-operator-controller-manager-74cb5cbc49-p96zk" podUID="a168d198-c58b-4674-8286-72fb306036b2" Oct 11 04:09:03 crc kubenswrapper[4798]: E1011 04:09:03.803669 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/manila-operator@sha256:582f7b1e411961b69f2e3c6b346aa25759b89f7720ed3fade1d363bf5d2dffc8\\\"\"" pod="openstack-operators/manila-operator-controller-manager-59578bc799-7kfp8" podUID="2448a4a6-cf4c-45ec-89cc-25621f444988" Oct 11 04:09:03 crc kubenswrapper[4798]: I1011 04:09:03.861709 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/barbican-operator-controller-manager-64f84fcdbb-pvqqn" podStartSLOduration=7.049311403 podStartE2EDuration="28.861687989s" podCreationTimestamp="2025-10-11 04:08:35 +0000 UTC" firstStartedPulling="2025-10-11 04:08:37.926294781 +0000 UTC m=+813.262584467" lastFinishedPulling="2025-10-11 04:08:59.738671367 +0000 UTC m=+835.074961053" observedRunningTime="2025-10-11 04:09:03.859747495 +0000 UTC m=+839.196037181" watchObservedRunningTime="2025-10-11 04:09:03.861687989 +0000 UTC m=+839.197977675" Oct 11 04:09:03 crc kubenswrapper[4798]: I1011 04:09:03.878493 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/ovn-operator-controller-manager-869cc7797f-t29x4" podStartSLOduration=6.797013623 podStartE2EDuration="27.878474061s" podCreationTimestamp="2025-10-11 04:08:36 +0000 UTC" firstStartedPulling="2025-10-11 04:08:38.65725931 +0000 UTC m=+813.993548996" lastFinishedPulling="2025-10-11 04:08:59.738719748 +0000 UTC m=+835.075009434" observedRunningTime="2025-10-11 04:09:03.876212869 +0000 UTC m=+839.212502555" watchObservedRunningTime="2025-10-11 04:09:03.878474061 +0000 UTC m=+839.214763747" Oct 11 04:09:03 crc kubenswrapper[4798]: I1011 04:09:03.901910 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6cc7fb757dqqlc4" podStartSLOduration=5.287593856 podStartE2EDuration="27.901888087s" podCreationTimestamp="2025-10-11 04:08:36 +0000 UTC" firstStartedPulling="2025-10-11 04:08:39.013875083 +0000 UTC m=+814.350164769" lastFinishedPulling="2025-10-11 04:09:01.628169314 +0000 UTC m=+836.964459000" observedRunningTime="2025-10-11 04:09:03.896735646 +0000 UTC m=+839.233025332" watchObservedRunningTime="2025-10-11 04:09:03.901888087 +0000 UTC m=+839.238177773" Oct 11 04:09:03 crc kubenswrapper[4798]: I1011 04:09:03.924853 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/heat-operator-controller-manager-6d9967f8dd-lgszb" podStartSLOduration=7.436823045 podStartE2EDuration="28.924834961s" podCreationTimestamp="2025-10-11 04:08:35 +0000 UTC" firstStartedPulling="2025-10-11 04:08:38.25060001 +0000 UTC m=+813.586889696" lastFinishedPulling="2025-10-11 04:08:59.738611886 +0000 UTC m=+835.074901612" observedRunningTime="2025-10-11 04:09:03.922363693 +0000 UTC m=+839.258653369" watchObservedRunningTime="2025-10-11 04:09:03.924834961 +0000 UTC m=+839.261124647" Oct 11 04:09:03 crc kubenswrapper[4798]: I1011 04:09:03.943850 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/placement-operator-controller-manager-664664cb68-jkj5p" podStartSLOduration=6.828827645 podStartE2EDuration="27.943830855s" podCreationTimestamp="2025-10-11 04:08:36 +0000 UTC" firstStartedPulling="2025-10-11 04:08:38.623651287 +0000 UTC m=+813.959940973" lastFinishedPulling="2025-10-11 04:08:59.738654487 +0000 UTC m=+835.074944183" observedRunningTime="2025-10-11 04:09:03.940383004 +0000 UTC m=+839.276672690" watchObservedRunningTime="2025-10-11 04:09:03.943830855 +0000 UTC m=+839.280120541" Oct 11 04:09:03 crc kubenswrapper[4798]: I1011 04:09:03.968355 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/octavia-operator-controller-manager-6d7c7ddf95-l9z67" podStartSLOduration=7.66003023 podStartE2EDuration="27.968319015s" podCreationTimestamp="2025-10-11 04:08:36 +0000 UTC" firstStartedPulling="2025-10-11 04:08:38.677020781 +0000 UTC m=+814.013310467" lastFinishedPulling="2025-10-11 04:08:58.985309566 +0000 UTC m=+834.321599252" observedRunningTime="2025-10-11 04:09:03.960221717 +0000 UTC m=+839.296511413" watchObservedRunningTime="2025-10-11 04:09:03.968319015 +0000 UTC m=+839.304608701" Oct 11 04:09:03 crc kubenswrapper[4798]: I1011 04:09:03.981098 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/infra-operator-controller-manager-585fc5b659-zsdqq" podStartSLOduration=5.120703694 podStartE2EDuration="27.981074862s" podCreationTimestamp="2025-10-11 04:08:36 +0000 UTC" firstStartedPulling="2025-10-11 04:08:38.690205668 +0000 UTC m=+814.026495354" lastFinishedPulling="2025-10-11 04:09:01.550576836 +0000 UTC m=+836.886866522" observedRunningTime="2025-10-11 04:09:03.974585822 +0000 UTC m=+839.310875508" watchObservedRunningTime="2025-10-11 04:09:03.981074862 +0000 UTC m=+839.317364538" Oct 11 04:09:04 crc kubenswrapper[4798]: I1011 04:09:04.011881 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/designate-operator-controller-manager-687df44cdb-x5n77" podStartSLOduration=9.087015973 podStartE2EDuration="29.0118592s" podCreationTimestamp="2025-10-11 04:08:35 +0000 UTC" firstStartedPulling="2025-10-11 04:08:38.630838334 +0000 UTC m=+813.967128020" lastFinishedPulling="2025-10-11 04:08:58.555681561 +0000 UTC m=+833.891971247" observedRunningTime="2025-10-11 04:09:04.006460835 +0000 UTC m=+839.342750521" watchObservedRunningTime="2025-10-11 04:09:04.0118592 +0000 UTC m=+839.348148886" Oct 11 04:09:04 crc kubenswrapper[4798]: I1011 04:09:04.032011 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/cinder-operator-controller-manager-59cdc64769-9cgvk" podStartSLOduration=6.936248657 podStartE2EDuration="29.031974779s" podCreationTimestamp="2025-10-11 04:08:35 +0000 UTC" firstStartedPulling="2025-10-11 04:08:37.642887584 +0000 UTC m=+812.979177270" lastFinishedPulling="2025-10-11 04:08:59.738613706 +0000 UTC m=+835.074903392" observedRunningTime="2025-10-11 04:09:04.025784074 +0000 UTC m=+839.362073760" watchObservedRunningTime="2025-10-11 04:09:04.031974779 +0000 UTC m=+839.368264475" Oct 11 04:09:04 crc kubenswrapper[4798]: I1011 04:09:04.100880 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/nova-operator-controller-manager-57bb74c7bf-6rfxm" podStartSLOduration=7.767312 podStartE2EDuration="28.100852095s" podCreationTimestamp="2025-10-11 04:08:36 +0000 UTC" firstStartedPulling="2025-10-11 04:08:38.651983057 +0000 UTC m=+813.988272753" lastFinishedPulling="2025-10-11 04:08:58.985523162 +0000 UTC m=+834.321812848" observedRunningTime="2025-10-11 04:09:04.094014785 +0000 UTC m=+839.430304471" watchObservedRunningTime="2025-10-11 04:09:04.100852095 +0000 UTC m=+839.437141781" Oct 11 04:09:04 crc kubenswrapper[4798]: I1011 04:09:04.129556 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/mariadb-operator-controller-manager-5777b4f897-l86k6" podStartSLOduration=7.04753897 podStartE2EDuration="28.129532093s" podCreationTimestamp="2025-10-11 04:08:36 +0000 UTC" firstStartedPulling="2025-10-11 04:08:38.656777678 +0000 UTC m=+813.993067374" lastFinishedPulling="2025-10-11 04:08:59.738770811 +0000 UTC m=+835.075060497" observedRunningTime="2025-10-11 04:09:04.124581008 +0000 UTC m=+839.460870694" watchObservedRunningTime="2025-10-11 04:09:04.129532093 +0000 UTC m=+839.465821779" Oct 11 04:09:04 crc kubenswrapper[4798]: I1011 04:09:04.156317 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/neutron-operator-controller-manager-797d478b46-rwhzb" podStartSLOduration=7.09160652 podStartE2EDuration="28.156284127s" podCreationTimestamp="2025-10-11 04:08:36 +0000 UTC" firstStartedPulling="2025-10-11 04:08:38.674010651 +0000 UTC m=+814.010300337" lastFinishedPulling="2025-10-11 04:08:59.738688218 +0000 UTC m=+835.074977944" observedRunningTime="2025-10-11 04:09:04.153299257 +0000 UTC m=+839.489588933" watchObservedRunningTime="2025-10-11 04:09:04.156284127 +0000 UTC m=+839.492573813" Oct 11 04:09:04 crc kubenswrapper[4798]: I1011 04:09:04.177768 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/watcher-operator-controller-manager-646675d848-68hdd" podStartSLOduration=5.522656775 podStartE2EDuration="28.177751827s" podCreationTimestamp="2025-10-11 04:08:36 +0000 UTC" firstStartedPulling="2025-10-11 04:08:39.019101025 +0000 UTC m=+814.355390711" lastFinishedPulling="2025-10-11 04:09:01.674196077 +0000 UTC m=+837.010485763" observedRunningTime="2025-10-11 04:09:04.175007283 +0000 UTC m=+839.511296969" watchObservedRunningTime="2025-10-11 04:09:04.177751827 +0000 UTC m=+839.514041503" Oct 11 04:09:04 crc kubenswrapper[4798]: I1011 04:09:04.809326 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-b5ff5" event={"ID":"9821447b-4298-4a37-b842-d157b9b453c6","Type":"ContainerStarted","Data":"f4718eb50346b78255b012043bb785799b7fa48ff62a0be8ddecb00b2cbaace7"} Oct 11 04:09:04 crc kubenswrapper[4798]: I1011 04:09:04.838596 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/telemetry-operator-controller-manager-578874c84d-zg2f8" podStartSLOduration=6.232106924 podStartE2EDuration="28.838573822s" podCreationTimestamp="2025-10-11 04:08:36 +0000 UTC" firstStartedPulling="2025-10-11 04:08:39.019239948 +0000 UTC m=+814.355529634" lastFinishedPulling="2025-10-11 04:09:01.625706836 +0000 UTC m=+836.961996532" observedRunningTime="2025-10-11 04:09:04.20958211 +0000 UTC m=+839.545871796" watchObservedRunningTime="2025-10-11 04:09:04.838573822 +0000 UTC m=+840.174863518" Oct 11 04:09:05 crc kubenswrapper[4798]: I1011 04:09:05.819630 4798 generic.go:334] "Generic (PLEG): container finished" podID="9821447b-4298-4a37-b842-d157b9b453c6" containerID="f4718eb50346b78255b012043bb785799b7fa48ff62a0be8ddecb00b2cbaace7" exitCode=0 Oct 11 04:09:05 crc kubenswrapper[4798]: I1011 04:09:05.819721 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-b5ff5" event={"ID":"9821447b-4298-4a37-b842-d157b9b453c6","Type":"ContainerDied","Data":"f4718eb50346b78255b012043bb785799b7fa48ff62a0be8ddecb00b2cbaace7"} Oct 11 04:09:06 crc kubenswrapper[4798]: I1011 04:09:06.715966 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/infra-operator-controller-manager-585fc5b659-zsdqq" Oct 11 04:09:06 crc kubenswrapper[4798]: I1011 04:09:06.829878 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-b5ff5" event={"ID":"9821447b-4298-4a37-b842-d157b9b453c6","Type":"ContainerStarted","Data":"571d60362d49700332c04abdb5d7bca87561e35f0a3241a0053a753f78a91972"} Oct 11 04:09:06 crc kubenswrapper[4798]: I1011 04:09:06.836797 4798 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-m7ldp" Oct 11 04:09:06 crc kubenswrapper[4798]: I1011 04:09:06.836845 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-m7ldp" Oct 11 04:09:06 crc kubenswrapper[4798]: I1011 04:09:06.853321 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-b5ff5" podStartSLOduration=9.376442299 podStartE2EDuration="11.853300027s" podCreationTimestamp="2025-10-11 04:08:55 +0000 UTC" firstStartedPulling="2025-10-11 04:09:03.770995455 +0000 UTC m=+839.107285141" lastFinishedPulling="2025-10-11 04:09:06.247853183 +0000 UTC m=+841.584142869" observedRunningTime="2025-10-11 04:09:06.845576067 +0000 UTC m=+842.181865753" watchObservedRunningTime="2025-10-11 04:09:06.853300027 +0000 UTC m=+842.189589713" Oct 11 04:09:07 crc kubenswrapper[4798]: I1011 04:09:07.115709 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/octavia-operator-controller-manager-6d7c7ddf95-l9z67" Oct 11 04:09:07 crc kubenswrapper[4798]: I1011 04:09:07.131826 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ovn-operator-controller-manager-869cc7797f-t29x4" Oct 11 04:09:07 crc kubenswrapper[4798]: I1011 04:09:07.344605 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/placement-operator-controller-manager-664664cb68-jkj5p" Oct 11 04:09:07 crc kubenswrapper[4798]: I1011 04:09:07.357962 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/swift-operator-controller-manager-5f4d5dfdc6-fffgl" Oct 11 04:09:07 crc kubenswrapper[4798]: I1011 04:09:07.382405 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/telemetry-operator-controller-manager-578874c84d-zg2f8" Oct 11 04:09:07 crc kubenswrapper[4798]: I1011 04:09:07.438241 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/watcher-operator-controller-manager-646675d848-68hdd" Oct 11 04:09:07 crc kubenswrapper[4798]: I1011 04:09:07.584296 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6cc7fb757dqqlc4" Oct 11 04:09:07 crc kubenswrapper[4798]: I1011 04:09:07.883495 4798 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-m7ldp" podUID="09559c43-2ce0-4bcf-a5f5-b30a42d5fbd9" containerName="registry-server" probeResult="failure" output=< Oct 11 04:09:07 crc kubenswrapper[4798]: timeout: failed to connect service ":50051" within 1s Oct 11 04:09:07 crc kubenswrapper[4798]: > Oct 11 04:09:15 crc kubenswrapper[4798]: I1011 04:09:15.887107 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-b5ff5" Oct 11 04:09:15 crc kubenswrapper[4798]: I1011 04:09:15.887629 4798 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-b5ff5" Oct 11 04:09:15 crc kubenswrapper[4798]: I1011 04:09:15.934618 4798 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-b5ff5" Oct 11 04:09:15 crc kubenswrapper[4798]: I1011 04:09:15.996627 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-b5ff5" Oct 11 04:09:16 crc kubenswrapper[4798]: I1011 04:09:16.182855 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-b5ff5"] Oct 11 04:09:16 crc kubenswrapper[4798]: I1011 04:09:16.278355 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/barbican-operator-controller-manager-64f84fcdbb-pvqqn" Oct 11 04:09:16 crc kubenswrapper[4798]: I1011 04:09:16.299826 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/cinder-operator-controller-manager-59cdc64769-9cgvk" Oct 11 04:09:16 crc kubenswrapper[4798]: I1011 04:09:16.487230 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/heat-operator-controller-manager-6d9967f8dd-lgszb" Oct 11 04:09:16 crc kubenswrapper[4798]: I1011 04:09:16.618061 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/designate-operator-controller-manager-687df44cdb-x5n77" Oct 11 04:09:16 crc kubenswrapper[4798]: I1011 04:09:16.661424 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/horizon-operator-controller-manager-6d74794d9b-vpg7r" Oct 11 04:09:16 crc kubenswrapper[4798]: I1011 04:09:16.721205 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/infra-operator-controller-manager-585fc5b659-zsdqq" Oct 11 04:09:16 crc kubenswrapper[4798]: I1011 04:09:16.798254 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/mariadb-operator-controller-manager-5777b4f897-l86k6" Oct 11 04:09:16 crc kubenswrapper[4798]: I1011 04:09:16.893937 4798 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-m7ldp" Oct 11 04:09:16 crc kubenswrapper[4798]: I1011 04:09:16.906065 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/neutron-operator-controller-manager-797d478b46-rwhzb" Oct 11 04:09:16 crc kubenswrapper[4798]: I1011 04:09:16.917843 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/nova-operator-controller-manager-57bb74c7bf-6rfxm" Oct 11 04:09:16 crc kubenswrapper[4798]: I1011 04:09:16.929349 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-74cb5cbc49-p96zk" event={"ID":"a168d198-c58b-4674-8286-72fb306036b2","Type":"ContainerStarted","Data":"16f03bc303d91d7bf3d9bef5e1a7ad1d9a7d56da553ca716c900a1734b412004"} Oct 11 04:09:16 crc kubenswrapper[4798]: I1011 04:09:16.929987 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ironic-operator-controller-manager-74cb5cbc49-p96zk" Oct 11 04:09:16 crc kubenswrapper[4798]: I1011 04:09:16.963094 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/ironic-operator-controller-manager-74cb5cbc49-p96zk" podStartSLOduration=2.455412169 podStartE2EDuration="40.963076508s" podCreationTimestamp="2025-10-11 04:08:36 +0000 UTC" firstStartedPulling="2025-10-11 04:08:38.244795455 +0000 UTC m=+813.581085141" lastFinishedPulling="2025-10-11 04:09:16.752459794 +0000 UTC m=+852.088749480" observedRunningTime="2025-10-11 04:09:16.959545274 +0000 UTC m=+852.295834950" watchObservedRunningTime="2025-10-11 04:09:16.963076508 +0000 UTC m=+852.299366194" Oct 11 04:09:16 crc kubenswrapper[4798]: I1011 04:09:16.968794 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-m7ldp" Oct 11 04:09:17 crc kubenswrapper[4798]: I1011 04:09:17.952264 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-7f5fc6b5ff-4zk9f" event={"ID":"4490c7b3-a500-4224-8597-abd71de4fa13","Type":"ContainerStarted","Data":"7a062a7f389df52bac3ca9e5d333d0c7945eba62bc1f26b5bf165a97a60de146"} Oct 11 04:09:17 crc kubenswrapper[4798]: I1011 04:09:17.952722 4798 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-b5ff5" podUID="9821447b-4298-4a37-b842-d157b9b453c6" containerName="registry-server" containerID="cri-o://571d60362d49700332c04abdb5d7bca87561e35f0a3241a0053a753f78a91972" gracePeriod=2 Oct 11 04:09:17 crc kubenswrapper[4798]: I1011 04:09:17.952851 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/glance-operator-controller-manager-7f5fc6b5ff-4zk9f" Oct 11 04:09:17 crc kubenswrapper[4798]: I1011 04:09:17.981106 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/glance-operator-controller-manager-7f5fc6b5ff-4zk9f" podStartSLOduration=4.136881963 podStartE2EDuration="42.981077373s" podCreationTimestamp="2025-10-11 04:08:35 +0000 UTC" firstStartedPulling="2025-10-11 04:08:37.970296556 +0000 UTC m=+813.306586242" lastFinishedPulling="2025-10-11 04:09:16.814491966 +0000 UTC m=+852.150781652" observedRunningTime="2025-10-11 04:09:17.975781337 +0000 UTC m=+853.312071063" watchObservedRunningTime="2025-10-11 04:09:17.981077373 +0000 UTC m=+853.317367099" Oct 11 04:09:18 crc kubenswrapper[4798]: I1011 04:09:18.514747 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-b5ff5" Oct 11 04:09:18 crc kubenswrapper[4798]: I1011 04:09:18.570472 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9821447b-4298-4a37-b842-d157b9b453c6-catalog-content\") pod \"9821447b-4298-4a37-b842-d157b9b453c6\" (UID: \"9821447b-4298-4a37-b842-d157b9b453c6\") " Oct 11 04:09:18 crc kubenswrapper[4798]: I1011 04:09:18.570866 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zsngr\" (UniqueName: \"kubernetes.io/projected/9821447b-4298-4a37-b842-d157b9b453c6-kube-api-access-zsngr\") pod \"9821447b-4298-4a37-b842-d157b9b453c6\" (UID: \"9821447b-4298-4a37-b842-d157b9b453c6\") " Oct 11 04:09:18 crc kubenswrapper[4798]: I1011 04:09:18.571059 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9821447b-4298-4a37-b842-d157b9b453c6-utilities\") pod \"9821447b-4298-4a37-b842-d157b9b453c6\" (UID: \"9821447b-4298-4a37-b842-d157b9b453c6\") " Oct 11 04:09:18 crc kubenswrapper[4798]: I1011 04:09:18.571720 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9821447b-4298-4a37-b842-d157b9b453c6-utilities" (OuterVolumeSpecName: "utilities") pod "9821447b-4298-4a37-b842-d157b9b453c6" (UID: "9821447b-4298-4a37-b842-d157b9b453c6"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 04:09:18 crc kubenswrapper[4798]: I1011 04:09:18.576663 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9821447b-4298-4a37-b842-d157b9b453c6-kube-api-access-zsngr" (OuterVolumeSpecName: "kube-api-access-zsngr") pod "9821447b-4298-4a37-b842-d157b9b453c6" (UID: "9821447b-4298-4a37-b842-d157b9b453c6"). InnerVolumeSpecName "kube-api-access-zsngr". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 04:09:18 crc kubenswrapper[4798]: I1011 04:09:18.584148 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9821447b-4298-4a37-b842-d157b9b453c6-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "9821447b-4298-4a37-b842-d157b9b453c6" (UID: "9821447b-4298-4a37-b842-d157b9b453c6"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 04:09:18 crc kubenswrapper[4798]: I1011 04:09:18.592436 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-m7ldp"] Oct 11 04:09:18 crc kubenswrapper[4798]: I1011 04:09:18.592707 4798 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-m7ldp" podUID="09559c43-2ce0-4bcf-a5f5-b30a42d5fbd9" containerName="registry-server" containerID="cri-o://6c003327b012b3473480336fca398d96e16190112c3aa5b9fedb9d84eb3047eb" gracePeriod=2 Oct 11 04:09:18 crc kubenswrapper[4798]: I1011 04:09:18.673115 4798 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9821447b-4298-4a37-b842-d157b9b453c6-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 11 04:09:18 crc kubenswrapper[4798]: I1011 04:09:18.673464 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zsngr\" (UniqueName: \"kubernetes.io/projected/9821447b-4298-4a37-b842-d157b9b453c6-kube-api-access-zsngr\") on node \"crc\" DevicePath \"\"" Oct 11 04:09:18 crc kubenswrapper[4798]: I1011 04:09:18.673480 4798 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9821447b-4298-4a37-b842-d157b9b453c6-utilities\") on node \"crc\" DevicePath \"\"" Oct 11 04:09:18 crc kubenswrapper[4798]: I1011 04:09:18.958342 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-59578bc799-7kfp8" event={"ID":"2448a4a6-cf4c-45ec-89cc-25621f444988","Type":"ContainerStarted","Data":"b78fb6e53c29abfee98ff1c777ed6d8e5a7d8b1ff9c357a48be4dde373f80a39"} Oct 11 04:09:18 crc kubenswrapper[4798]: I1011 04:09:18.959295 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/manila-operator-controller-manager-59578bc799-7kfp8" Oct 11 04:09:18 crc kubenswrapper[4798]: I1011 04:09:18.962445 4798 generic.go:334] "Generic (PLEG): container finished" podID="9821447b-4298-4a37-b842-d157b9b453c6" containerID="571d60362d49700332c04abdb5d7bca87561e35f0a3241a0053a753f78a91972" exitCode=0 Oct 11 04:09:18 crc kubenswrapper[4798]: I1011 04:09:18.962498 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-b5ff5" Oct 11 04:09:18 crc kubenswrapper[4798]: I1011 04:09:18.962543 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-b5ff5" event={"ID":"9821447b-4298-4a37-b842-d157b9b453c6","Type":"ContainerDied","Data":"571d60362d49700332c04abdb5d7bca87561e35f0a3241a0053a753f78a91972"} Oct 11 04:09:18 crc kubenswrapper[4798]: I1011 04:09:18.962594 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-b5ff5" event={"ID":"9821447b-4298-4a37-b842-d157b9b453c6","Type":"ContainerDied","Data":"cf38d105523e5d9be85f798647f1babee51dbecf803deae628d0da9867131598"} Oct 11 04:09:18 crc kubenswrapper[4798]: I1011 04:09:18.962621 4798 scope.go:117] "RemoveContainer" containerID="571d60362d49700332c04abdb5d7bca87561e35f0a3241a0053a753f78a91972" Oct 11 04:09:18 crc kubenswrapper[4798]: I1011 04:09:18.964810 4798 generic.go:334] "Generic (PLEG): container finished" podID="09559c43-2ce0-4bcf-a5f5-b30a42d5fbd9" containerID="6c003327b012b3473480336fca398d96e16190112c3aa5b9fedb9d84eb3047eb" exitCode=0 Oct 11 04:09:18 crc kubenswrapper[4798]: I1011 04:09:18.964866 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-m7ldp" event={"ID":"09559c43-2ce0-4bcf-a5f5-b30a42d5fbd9","Type":"ContainerDied","Data":"6c003327b012b3473480336fca398d96e16190112c3aa5b9fedb9d84eb3047eb"} Oct 11 04:09:18 crc kubenswrapper[4798]: I1011 04:09:18.968479 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-ffcdd6c94-4t4ft" event={"ID":"2c332094-6f91-4065-829a-736e42bd6560","Type":"ContainerStarted","Data":"462dba82f2b7d0000cdd5bc6735a1ddd124b7a0821297968845629e3e8e9dfcf"} Oct 11 04:09:18 crc kubenswrapper[4798]: I1011 04:09:18.969442 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/test-operator-controller-manager-ffcdd6c94-4t4ft" Oct 11 04:09:18 crc kubenswrapper[4798]: I1011 04:09:18.971143 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-ddb98f99b-npmgn" event={"ID":"2fe367a2-f6f5-47ea-97a5-75a8b79778fb","Type":"ContainerStarted","Data":"642304242d0f1fcf684debc5ed723d2302f66dd7e82b3664f2a49aeaff2549c2"} Oct 11 04:09:18 crc kubenswrapper[4798]: I1011 04:09:18.971541 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/keystone-operator-controller-manager-ddb98f99b-npmgn" Oct 11 04:09:18 crc kubenswrapper[4798]: I1011 04:09:18.979041 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/manila-operator-controller-manager-59578bc799-7kfp8" podStartSLOduration=3.6699699040000002 podStartE2EDuration="42.979025458s" podCreationTimestamp="2025-10-11 04:08:36 +0000 UTC" firstStartedPulling="2025-10-11 04:08:38.679968599 +0000 UTC m=+814.016258285" lastFinishedPulling="2025-10-11 04:09:17.989024153 +0000 UTC m=+853.325313839" observedRunningTime="2025-10-11 04:09:18.974197063 +0000 UTC m=+854.310486759" watchObservedRunningTime="2025-10-11 04:09:18.979025458 +0000 UTC m=+854.315315144" Oct 11 04:09:18 crc kubenswrapper[4798]: I1011 04:09:18.988724 4798 scope.go:117] "RemoveContainer" containerID="f4718eb50346b78255b012043bb785799b7fa48ff62a0be8ddecb00b2cbaace7" Oct 11 04:09:18 crc kubenswrapper[4798]: I1011 04:09:18.994764 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/test-operator-controller-manager-ffcdd6c94-4t4ft" podStartSLOduration=4.016821659 podStartE2EDuration="42.994743054s" podCreationTimestamp="2025-10-11 04:08:36 +0000 UTC" firstStartedPulling="2025-10-11 04:08:39.012973832 +0000 UTC m=+814.349263518" lastFinishedPulling="2025-10-11 04:09:17.990895227 +0000 UTC m=+853.327184913" observedRunningTime="2025-10-11 04:09:18.993964706 +0000 UTC m=+854.330254392" watchObservedRunningTime="2025-10-11 04:09:18.994743054 +0000 UTC m=+854.331032740" Oct 11 04:09:19 crc kubenswrapper[4798]: I1011 04:09:19.011163 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/keystone-operator-controller-manager-ddb98f99b-npmgn" podStartSLOduration=3.291971597 podStartE2EDuration="43.011140016s" podCreationTimestamp="2025-10-11 04:08:36 +0000 UTC" firstStartedPulling="2025-10-11 04:08:38.244885517 +0000 UTC m=+813.581175203" lastFinishedPulling="2025-10-11 04:09:17.964053936 +0000 UTC m=+853.300343622" observedRunningTime="2025-10-11 04:09:19.007364835 +0000 UTC m=+854.343654511" watchObservedRunningTime="2025-10-11 04:09:19.011140016 +0000 UTC m=+854.347429692" Oct 11 04:09:19 crc kubenswrapper[4798]: I1011 04:09:19.020985 4798 scope.go:117] "RemoveContainer" containerID="70c790cecdd1be2cf05db3510c8af04a01afe23ff43c7ef0f199e78c9ce0a00e" Oct 11 04:09:19 crc kubenswrapper[4798]: I1011 04:09:19.034207 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-b5ff5"] Oct 11 04:09:19 crc kubenswrapper[4798]: I1011 04:09:19.039456 4798 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-b5ff5"] Oct 11 04:09:19 crc kubenswrapper[4798]: I1011 04:09:19.066168 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-m7ldp" Oct 11 04:09:19 crc kubenswrapper[4798]: I1011 04:09:19.075362 4798 scope.go:117] "RemoveContainer" containerID="571d60362d49700332c04abdb5d7bca87561e35f0a3241a0053a753f78a91972" Oct 11 04:09:19 crc kubenswrapper[4798]: E1011 04:09:19.075872 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"571d60362d49700332c04abdb5d7bca87561e35f0a3241a0053a753f78a91972\": container with ID starting with 571d60362d49700332c04abdb5d7bca87561e35f0a3241a0053a753f78a91972 not found: ID does not exist" containerID="571d60362d49700332c04abdb5d7bca87561e35f0a3241a0053a753f78a91972" Oct 11 04:09:19 crc kubenswrapper[4798]: I1011 04:09:19.075924 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"571d60362d49700332c04abdb5d7bca87561e35f0a3241a0053a753f78a91972"} err="failed to get container status \"571d60362d49700332c04abdb5d7bca87561e35f0a3241a0053a753f78a91972\": rpc error: code = NotFound desc = could not find container \"571d60362d49700332c04abdb5d7bca87561e35f0a3241a0053a753f78a91972\": container with ID starting with 571d60362d49700332c04abdb5d7bca87561e35f0a3241a0053a753f78a91972 not found: ID does not exist" Oct 11 04:09:19 crc kubenswrapper[4798]: I1011 04:09:19.075953 4798 scope.go:117] "RemoveContainer" containerID="f4718eb50346b78255b012043bb785799b7fa48ff62a0be8ddecb00b2cbaace7" Oct 11 04:09:19 crc kubenswrapper[4798]: E1011 04:09:19.076277 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f4718eb50346b78255b012043bb785799b7fa48ff62a0be8ddecb00b2cbaace7\": container with ID starting with f4718eb50346b78255b012043bb785799b7fa48ff62a0be8ddecb00b2cbaace7 not found: ID does not exist" containerID="f4718eb50346b78255b012043bb785799b7fa48ff62a0be8ddecb00b2cbaace7" Oct 11 04:09:19 crc kubenswrapper[4798]: I1011 04:09:19.076326 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f4718eb50346b78255b012043bb785799b7fa48ff62a0be8ddecb00b2cbaace7"} err="failed to get container status \"f4718eb50346b78255b012043bb785799b7fa48ff62a0be8ddecb00b2cbaace7\": rpc error: code = NotFound desc = could not find container \"f4718eb50346b78255b012043bb785799b7fa48ff62a0be8ddecb00b2cbaace7\": container with ID starting with f4718eb50346b78255b012043bb785799b7fa48ff62a0be8ddecb00b2cbaace7 not found: ID does not exist" Oct 11 04:09:19 crc kubenswrapper[4798]: I1011 04:09:19.076351 4798 scope.go:117] "RemoveContainer" containerID="70c790cecdd1be2cf05db3510c8af04a01afe23ff43c7ef0f199e78c9ce0a00e" Oct 11 04:09:19 crc kubenswrapper[4798]: E1011 04:09:19.076652 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"70c790cecdd1be2cf05db3510c8af04a01afe23ff43c7ef0f199e78c9ce0a00e\": container with ID starting with 70c790cecdd1be2cf05db3510c8af04a01afe23ff43c7ef0f199e78c9ce0a00e not found: ID does not exist" containerID="70c790cecdd1be2cf05db3510c8af04a01afe23ff43c7ef0f199e78c9ce0a00e" Oct 11 04:09:19 crc kubenswrapper[4798]: I1011 04:09:19.076700 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"70c790cecdd1be2cf05db3510c8af04a01afe23ff43c7ef0f199e78c9ce0a00e"} err="failed to get container status \"70c790cecdd1be2cf05db3510c8af04a01afe23ff43c7ef0f199e78c9ce0a00e\": rpc error: code = NotFound desc = could not find container \"70c790cecdd1be2cf05db3510c8af04a01afe23ff43c7ef0f199e78c9ce0a00e\": container with ID starting with 70c790cecdd1be2cf05db3510c8af04a01afe23ff43c7ef0f199e78c9ce0a00e not found: ID does not exist" Oct 11 04:09:19 crc kubenswrapper[4798]: I1011 04:09:19.179638 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/09559c43-2ce0-4bcf-a5f5-b30a42d5fbd9-utilities\") pod \"09559c43-2ce0-4bcf-a5f5-b30a42d5fbd9\" (UID: \"09559c43-2ce0-4bcf-a5f5-b30a42d5fbd9\") " Oct 11 04:09:19 crc kubenswrapper[4798]: I1011 04:09:19.179782 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/09559c43-2ce0-4bcf-a5f5-b30a42d5fbd9-catalog-content\") pod \"09559c43-2ce0-4bcf-a5f5-b30a42d5fbd9\" (UID: \"09559c43-2ce0-4bcf-a5f5-b30a42d5fbd9\") " Oct 11 04:09:19 crc kubenswrapper[4798]: I1011 04:09:19.179837 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8pb5z\" (UniqueName: \"kubernetes.io/projected/09559c43-2ce0-4bcf-a5f5-b30a42d5fbd9-kube-api-access-8pb5z\") pod \"09559c43-2ce0-4bcf-a5f5-b30a42d5fbd9\" (UID: \"09559c43-2ce0-4bcf-a5f5-b30a42d5fbd9\") " Oct 11 04:09:19 crc kubenswrapper[4798]: I1011 04:09:19.180539 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/09559c43-2ce0-4bcf-a5f5-b30a42d5fbd9-utilities" (OuterVolumeSpecName: "utilities") pod "09559c43-2ce0-4bcf-a5f5-b30a42d5fbd9" (UID: "09559c43-2ce0-4bcf-a5f5-b30a42d5fbd9"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 04:09:19 crc kubenswrapper[4798]: I1011 04:09:19.186210 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09559c43-2ce0-4bcf-a5f5-b30a42d5fbd9-kube-api-access-8pb5z" (OuterVolumeSpecName: "kube-api-access-8pb5z") pod "09559c43-2ce0-4bcf-a5f5-b30a42d5fbd9" (UID: "09559c43-2ce0-4bcf-a5f5-b30a42d5fbd9"). InnerVolumeSpecName "kube-api-access-8pb5z". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 04:09:19 crc kubenswrapper[4798]: I1011 04:09:19.274479 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/09559c43-2ce0-4bcf-a5f5-b30a42d5fbd9-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "09559c43-2ce0-4bcf-a5f5-b30a42d5fbd9" (UID: "09559c43-2ce0-4bcf-a5f5-b30a42d5fbd9"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 04:09:19 crc kubenswrapper[4798]: I1011 04:09:19.282380 4798 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/09559c43-2ce0-4bcf-a5f5-b30a42d5fbd9-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 11 04:09:19 crc kubenswrapper[4798]: I1011 04:09:19.282469 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8pb5z\" (UniqueName: \"kubernetes.io/projected/09559c43-2ce0-4bcf-a5f5-b30a42d5fbd9-kube-api-access-8pb5z\") on node \"crc\" DevicePath \"\"" Oct 11 04:09:19 crc kubenswrapper[4798]: I1011 04:09:19.282492 4798 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/09559c43-2ce0-4bcf-a5f5-b30a42d5fbd9-utilities\") on node \"crc\" DevicePath \"\"" Oct 11 04:09:19 crc kubenswrapper[4798]: I1011 04:09:19.432022 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9821447b-4298-4a37-b842-d157b9b453c6" path="/var/lib/kubelet/pods/9821447b-4298-4a37-b842-d157b9b453c6/volumes" Oct 11 04:09:19 crc kubenswrapper[4798]: I1011 04:09:19.980875 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-m7ldp" Oct 11 04:09:19 crc kubenswrapper[4798]: I1011 04:09:19.980861 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-m7ldp" event={"ID":"09559c43-2ce0-4bcf-a5f5-b30a42d5fbd9","Type":"ContainerDied","Data":"1820131e4e89f460615d99d78100b1de144bc8b3cfd5280b8f9294b3e790c4b4"} Oct 11 04:09:19 crc kubenswrapper[4798]: I1011 04:09:19.980956 4798 scope.go:117] "RemoveContainer" containerID="6c003327b012b3473480336fca398d96e16190112c3aa5b9fedb9d84eb3047eb" Oct 11 04:09:20 crc kubenswrapper[4798]: I1011 04:09:20.004969 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-m7ldp"] Oct 11 04:09:20 crc kubenswrapper[4798]: I1011 04:09:20.006353 4798 scope.go:117] "RemoveContainer" containerID="a04a377cc8f223fb8a450c1787d84b5fdba69deb3645cfccd5974701b3546dd1" Oct 11 04:09:20 crc kubenswrapper[4798]: I1011 04:09:20.010960 4798 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-m7ldp"] Oct 11 04:09:20 crc kubenswrapper[4798]: I1011 04:09:20.034710 4798 scope.go:117] "RemoveContainer" containerID="4a97859d8d96c11f44bcaabbabea627085d0ef182e07df68e5bc1f6acbb78c5b" Oct 11 04:09:21 crc kubenswrapper[4798]: I1011 04:09:21.448553 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09559c43-2ce0-4bcf-a5f5-b30a42d5fbd9" path="/var/lib/kubelet/pods/09559c43-2ce0-4bcf-a5f5-b30a42d5fbd9/volumes" Oct 11 04:09:26 crc kubenswrapper[4798]: I1011 04:09:26.367697 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/glance-operator-controller-manager-7f5fc6b5ff-4zk9f" Oct 11 04:09:26 crc kubenswrapper[4798]: I1011 04:09:26.731867 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/keystone-operator-controller-manager-ddb98f99b-npmgn" Oct 11 04:09:26 crc kubenswrapper[4798]: I1011 04:09:26.750792 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ironic-operator-controller-manager-74cb5cbc49-p96zk" Oct 11 04:09:26 crc kubenswrapper[4798]: I1011 04:09:26.773859 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/manila-operator-controller-manager-59578bc799-7kfp8" Oct 11 04:09:27 crc kubenswrapper[4798]: I1011 04:09:27.138980 4798 patch_prober.go:28] interesting pod/machine-config-daemon-h28s2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 11 04:09:27 crc kubenswrapper[4798]: I1011 04:09:27.139053 4798 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 11 04:09:27 crc kubenswrapper[4798]: I1011 04:09:27.404979 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/test-operator-controller-manager-ffcdd6c94-4t4ft" Oct 11 04:09:42 crc kubenswrapper[4798]: I1011 04:09:42.020552 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-fjmqr"] Oct 11 04:09:42 crc kubenswrapper[4798]: E1011 04:09:42.022563 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9821447b-4298-4a37-b842-d157b9b453c6" containerName="extract-content" Oct 11 04:09:42 crc kubenswrapper[4798]: I1011 04:09:42.022623 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="9821447b-4298-4a37-b842-d157b9b453c6" containerName="extract-content" Oct 11 04:09:42 crc kubenswrapper[4798]: E1011 04:09:42.022690 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9821447b-4298-4a37-b842-d157b9b453c6" containerName="extract-utilities" Oct 11 04:09:42 crc kubenswrapper[4798]: I1011 04:09:42.022702 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="9821447b-4298-4a37-b842-d157b9b453c6" containerName="extract-utilities" Oct 11 04:09:42 crc kubenswrapper[4798]: E1011 04:09:42.022728 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="09559c43-2ce0-4bcf-a5f5-b30a42d5fbd9" containerName="extract-content" Oct 11 04:09:42 crc kubenswrapper[4798]: I1011 04:09:42.022739 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="09559c43-2ce0-4bcf-a5f5-b30a42d5fbd9" containerName="extract-content" Oct 11 04:09:42 crc kubenswrapper[4798]: E1011 04:09:42.022767 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9821447b-4298-4a37-b842-d157b9b453c6" containerName="registry-server" Oct 11 04:09:42 crc kubenswrapper[4798]: I1011 04:09:42.022776 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="9821447b-4298-4a37-b842-d157b9b453c6" containerName="registry-server" Oct 11 04:09:42 crc kubenswrapper[4798]: E1011 04:09:42.022805 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="09559c43-2ce0-4bcf-a5f5-b30a42d5fbd9" containerName="extract-utilities" Oct 11 04:09:42 crc kubenswrapper[4798]: I1011 04:09:42.022813 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="09559c43-2ce0-4bcf-a5f5-b30a42d5fbd9" containerName="extract-utilities" Oct 11 04:09:42 crc kubenswrapper[4798]: E1011 04:09:42.022839 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="09559c43-2ce0-4bcf-a5f5-b30a42d5fbd9" containerName="registry-server" Oct 11 04:09:42 crc kubenswrapper[4798]: I1011 04:09:42.022849 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="09559c43-2ce0-4bcf-a5f5-b30a42d5fbd9" containerName="registry-server" Oct 11 04:09:42 crc kubenswrapper[4798]: I1011 04:09:42.023090 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="9821447b-4298-4a37-b842-d157b9b453c6" containerName="registry-server" Oct 11 04:09:42 crc kubenswrapper[4798]: I1011 04:09:42.023111 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="09559c43-2ce0-4bcf-a5f5-b30a42d5fbd9" containerName="registry-server" Oct 11 04:09:42 crc kubenswrapper[4798]: I1011 04:09:42.024331 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-fjmqr" Oct 11 04:09:42 crc kubenswrapper[4798]: I1011 04:09:42.028467 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openshift-service-ca.crt" Oct 11 04:09:42 crc kubenswrapper[4798]: I1011 04:09:42.028686 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"kube-root-ca.crt" Oct 11 04:09:42 crc kubenswrapper[4798]: I1011 04:09:42.028886 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dnsmasq-dns-dockercfg-dqlm7" Oct 11 04:09:42 crc kubenswrapper[4798]: I1011 04:09:42.029028 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns" Oct 11 04:09:42 crc kubenswrapper[4798]: I1011 04:09:42.051216 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-fjmqr"] Oct 11 04:09:42 crc kubenswrapper[4798]: I1011 04:09:42.098787 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-r2ws8"] Oct 11 04:09:42 crc kubenswrapper[4798]: I1011 04:09:42.100427 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-r2ws8" Oct 11 04:09:42 crc kubenswrapper[4798]: I1011 04:09:42.106653 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-svc" Oct 11 04:09:42 crc kubenswrapper[4798]: I1011 04:09:42.118299 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-r2ws8"] Oct 11 04:09:42 crc kubenswrapper[4798]: I1011 04:09:42.138062 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pchjj\" (UniqueName: \"kubernetes.io/projected/a42a149b-4472-4ba5-b522-db08d1452296-kube-api-access-pchjj\") pod \"dnsmasq-dns-675f4bcbfc-fjmqr\" (UID: \"a42a149b-4472-4ba5-b522-db08d1452296\") " pod="openstack/dnsmasq-dns-675f4bcbfc-fjmqr" Oct 11 04:09:42 crc kubenswrapper[4798]: I1011 04:09:42.138115 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a42a149b-4472-4ba5-b522-db08d1452296-config\") pod \"dnsmasq-dns-675f4bcbfc-fjmqr\" (UID: \"a42a149b-4472-4ba5-b522-db08d1452296\") " pod="openstack/dnsmasq-dns-675f4bcbfc-fjmqr" Oct 11 04:09:42 crc kubenswrapper[4798]: I1011 04:09:42.240017 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a856b627-fcd6-4e90-be23-4f4712b5ac04-config\") pod \"dnsmasq-dns-78dd6ddcc-r2ws8\" (UID: \"a856b627-fcd6-4e90-be23-4f4712b5ac04\") " pod="openstack/dnsmasq-dns-78dd6ddcc-r2ws8" Oct 11 04:09:42 crc kubenswrapper[4798]: I1011 04:09:42.240090 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pchjj\" (UniqueName: \"kubernetes.io/projected/a42a149b-4472-4ba5-b522-db08d1452296-kube-api-access-pchjj\") pod \"dnsmasq-dns-675f4bcbfc-fjmqr\" (UID: \"a42a149b-4472-4ba5-b522-db08d1452296\") " pod="openstack/dnsmasq-dns-675f4bcbfc-fjmqr" Oct 11 04:09:42 crc kubenswrapper[4798]: I1011 04:09:42.240116 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a856b627-fcd6-4e90-be23-4f4712b5ac04-dns-svc\") pod \"dnsmasq-dns-78dd6ddcc-r2ws8\" (UID: \"a856b627-fcd6-4e90-be23-4f4712b5ac04\") " pod="openstack/dnsmasq-dns-78dd6ddcc-r2ws8" Oct 11 04:09:42 crc kubenswrapper[4798]: I1011 04:09:42.240503 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a42a149b-4472-4ba5-b522-db08d1452296-config\") pod \"dnsmasq-dns-675f4bcbfc-fjmqr\" (UID: \"a42a149b-4472-4ba5-b522-db08d1452296\") " pod="openstack/dnsmasq-dns-675f4bcbfc-fjmqr" Oct 11 04:09:42 crc kubenswrapper[4798]: I1011 04:09:42.240779 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8rv75\" (UniqueName: \"kubernetes.io/projected/a856b627-fcd6-4e90-be23-4f4712b5ac04-kube-api-access-8rv75\") pod \"dnsmasq-dns-78dd6ddcc-r2ws8\" (UID: \"a856b627-fcd6-4e90-be23-4f4712b5ac04\") " pod="openstack/dnsmasq-dns-78dd6ddcc-r2ws8" Oct 11 04:09:42 crc kubenswrapper[4798]: I1011 04:09:42.241465 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a42a149b-4472-4ba5-b522-db08d1452296-config\") pod \"dnsmasq-dns-675f4bcbfc-fjmqr\" (UID: \"a42a149b-4472-4ba5-b522-db08d1452296\") " pod="openstack/dnsmasq-dns-675f4bcbfc-fjmqr" Oct 11 04:09:42 crc kubenswrapper[4798]: I1011 04:09:42.270843 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pchjj\" (UniqueName: \"kubernetes.io/projected/a42a149b-4472-4ba5-b522-db08d1452296-kube-api-access-pchjj\") pod \"dnsmasq-dns-675f4bcbfc-fjmqr\" (UID: \"a42a149b-4472-4ba5-b522-db08d1452296\") " pod="openstack/dnsmasq-dns-675f4bcbfc-fjmqr" Oct 11 04:09:42 crc kubenswrapper[4798]: I1011 04:09:42.342313 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8rv75\" (UniqueName: \"kubernetes.io/projected/a856b627-fcd6-4e90-be23-4f4712b5ac04-kube-api-access-8rv75\") pod \"dnsmasq-dns-78dd6ddcc-r2ws8\" (UID: \"a856b627-fcd6-4e90-be23-4f4712b5ac04\") " pod="openstack/dnsmasq-dns-78dd6ddcc-r2ws8" Oct 11 04:09:42 crc kubenswrapper[4798]: I1011 04:09:42.342435 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a856b627-fcd6-4e90-be23-4f4712b5ac04-config\") pod \"dnsmasq-dns-78dd6ddcc-r2ws8\" (UID: \"a856b627-fcd6-4e90-be23-4f4712b5ac04\") " pod="openstack/dnsmasq-dns-78dd6ddcc-r2ws8" Oct 11 04:09:42 crc kubenswrapper[4798]: I1011 04:09:42.342508 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a856b627-fcd6-4e90-be23-4f4712b5ac04-dns-svc\") pod \"dnsmasq-dns-78dd6ddcc-r2ws8\" (UID: \"a856b627-fcd6-4e90-be23-4f4712b5ac04\") " pod="openstack/dnsmasq-dns-78dd6ddcc-r2ws8" Oct 11 04:09:42 crc kubenswrapper[4798]: I1011 04:09:42.343750 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a856b627-fcd6-4e90-be23-4f4712b5ac04-dns-svc\") pod \"dnsmasq-dns-78dd6ddcc-r2ws8\" (UID: \"a856b627-fcd6-4e90-be23-4f4712b5ac04\") " pod="openstack/dnsmasq-dns-78dd6ddcc-r2ws8" Oct 11 04:09:42 crc kubenswrapper[4798]: I1011 04:09:42.343790 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a856b627-fcd6-4e90-be23-4f4712b5ac04-config\") pod \"dnsmasq-dns-78dd6ddcc-r2ws8\" (UID: \"a856b627-fcd6-4e90-be23-4f4712b5ac04\") " pod="openstack/dnsmasq-dns-78dd6ddcc-r2ws8" Oct 11 04:09:42 crc kubenswrapper[4798]: I1011 04:09:42.350593 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-fjmqr" Oct 11 04:09:42 crc kubenswrapper[4798]: I1011 04:09:42.360328 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8rv75\" (UniqueName: \"kubernetes.io/projected/a856b627-fcd6-4e90-be23-4f4712b5ac04-kube-api-access-8rv75\") pod \"dnsmasq-dns-78dd6ddcc-r2ws8\" (UID: \"a856b627-fcd6-4e90-be23-4f4712b5ac04\") " pod="openstack/dnsmasq-dns-78dd6ddcc-r2ws8" Oct 11 04:09:42 crc kubenswrapper[4798]: I1011 04:09:42.418063 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-r2ws8" Oct 11 04:09:42 crc kubenswrapper[4798]: I1011 04:09:42.851225 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-fjmqr"] Oct 11 04:09:42 crc kubenswrapper[4798]: I1011 04:09:42.861767 4798 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Oct 11 04:09:42 crc kubenswrapper[4798]: I1011 04:09:42.934019 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-r2ws8"] Oct 11 04:09:43 crc kubenswrapper[4798]: I1011 04:09:43.207891 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78dd6ddcc-r2ws8" event={"ID":"a856b627-fcd6-4e90-be23-4f4712b5ac04","Type":"ContainerStarted","Data":"8d69e0555636e85a30347cddc58e79bede5f26e73257fb389656e775e7cfc1a7"} Oct 11 04:09:43 crc kubenswrapper[4798]: I1011 04:09:43.209476 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-675f4bcbfc-fjmqr" event={"ID":"a42a149b-4472-4ba5-b522-db08d1452296","Type":"ContainerStarted","Data":"ead72681fe5962627f95b7939de50bb683ddb5aad3df0285fde06bebfaca9399"} Oct 11 04:09:45 crc kubenswrapper[4798]: I1011 04:09:45.080189 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-fjmqr"] Oct 11 04:09:45 crc kubenswrapper[4798]: I1011 04:09:45.107045 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-cg6s5"] Oct 11 04:09:45 crc kubenswrapper[4798]: I1011 04:09:45.108302 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-cg6s5" Oct 11 04:09:45 crc kubenswrapper[4798]: I1011 04:09:45.122493 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-cg6s5"] Oct 11 04:09:45 crc kubenswrapper[4798]: I1011 04:09:45.197819 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/40f1f306-106b-4b2c-85bf-d97962d6db9a-config\") pod \"dnsmasq-dns-666b6646f7-cg6s5\" (UID: \"40f1f306-106b-4b2c-85bf-d97962d6db9a\") " pod="openstack/dnsmasq-dns-666b6646f7-cg6s5" Oct 11 04:09:45 crc kubenswrapper[4798]: I1011 04:09:45.197879 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vl56m\" (UniqueName: \"kubernetes.io/projected/40f1f306-106b-4b2c-85bf-d97962d6db9a-kube-api-access-vl56m\") pod \"dnsmasq-dns-666b6646f7-cg6s5\" (UID: \"40f1f306-106b-4b2c-85bf-d97962d6db9a\") " pod="openstack/dnsmasq-dns-666b6646f7-cg6s5" Oct 11 04:09:45 crc kubenswrapper[4798]: I1011 04:09:45.198058 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/40f1f306-106b-4b2c-85bf-d97962d6db9a-dns-svc\") pod \"dnsmasq-dns-666b6646f7-cg6s5\" (UID: \"40f1f306-106b-4b2c-85bf-d97962d6db9a\") " pod="openstack/dnsmasq-dns-666b6646f7-cg6s5" Oct 11 04:09:45 crc kubenswrapper[4798]: I1011 04:09:45.302827 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/40f1f306-106b-4b2c-85bf-d97962d6db9a-config\") pod \"dnsmasq-dns-666b6646f7-cg6s5\" (UID: \"40f1f306-106b-4b2c-85bf-d97962d6db9a\") " pod="openstack/dnsmasq-dns-666b6646f7-cg6s5" Oct 11 04:09:45 crc kubenswrapper[4798]: I1011 04:09:45.302923 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vl56m\" (UniqueName: \"kubernetes.io/projected/40f1f306-106b-4b2c-85bf-d97962d6db9a-kube-api-access-vl56m\") pod \"dnsmasq-dns-666b6646f7-cg6s5\" (UID: \"40f1f306-106b-4b2c-85bf-d97962d6db9a\") " pod="openstack/dnsmasq-dns-666b6646f7-cg6s5" Oct 11 04:09:45 crc kubenswrapper[4798]: I1011 04:09:45.303019 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/40f1f306-106b-4b2c-85bf-d97962d6db9a-dns-svc\") pod \"dnsmasq-dns-666b6646f7-cg6s5\" (UID: \"40f1f306-106b-4b2c-85bf-d97962d6db9a\") " pod="openstack/dnsmasq-dns-666b6646f7-cg6s5" Oct 11 04:09:45 crc kubenswrapper[4798]: I1011 04:09:45.304057 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/40f1f306-106b-4b2c-85bf-d97962d6db9a-config\") pod \"dnsmasq-dns-666b6646f7-cg6s5\" (UID: \"40f1f306-106b-4b2c-85bf-d97962d6db9a\") " pod="openstack/dnsmasq-dns-666b6646f7-cg6s5" Oct 11 04:09:45 crc kubenswrapper[4798]: I1011 04:09:45.304168 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/40f1f306-106b-4b2c-85bf-d97962d6db9a-dns-svc\") pod \"dnsmasq-dns-666b6646f7-cg6s5\" (UID: \"40f1f306-106b-4b2c-85bf-d97962d6db9a\") " pod="openstack/dnsmasq-dns-666b6646f7-cg6s5" Oct 11 04:09:45 crc kubenswrapper[4798]: I1011 04:09:45.358893 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vl56m\" (UniqueName: \"kubernetes.io/projected/40f1f306-106b-4b2c-85bf-d97962d6db9a-kube-api-access-vl56m\") pod \"dnsmasq-dns-666b6646f7-cg6s5\" (UID: \"40f1f306-106b-4b2c-85bf-d97962d6db9a\") " pod="openstack/dnsmasq-dns-666b6646f7-cg6s5" Oct 11 04:09:45 crc kubenswrapper[4798]: I1011 04:09:45.436083 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-r2ws8"] Oct 11 04:09:45 crc kubenswrapper[4798]: I1011 04:09:45.452444 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-cg6s5" Oct 11 04:09:45 crc kubenswrapper[4798]: I1011 04:09:45.485199 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-wcmbz"] Oct 11 04:09:45 crc kubenswrapper[4798]: I1011 04:09:45.487134 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-wcmbz" Oct 11 04:09:45 crc kubenswrapper[4798]: I1011 04:09:45.528923 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-wcmbz"] Oct 11 04:09:45 crc kubenswrapper[4798]: I1011 04:09:45.614969 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ef82cf1a-27d8-4ce1-82ca-5143fb76af9a-config\") pod \"dnsmasq-dns-57d769cc4f-wcmbz\" (UID: \"ef82cf1a-27d8-4ce1-82ca-5143fb76af9a\") " pod="openstack/dnsmasq-dns-57d769cc4f-wcmbz" Oct 11 04:09:45 crc kubenswrapper[4798]: I1011 04:09:45.616131 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ef82cf1a-27d8-4ce1-82ca-5143fb76af9a-dns-svc\") pod \"dnsmasq-dns-57d769cc4f-wcmbz\" (UID: \"ef82cf1a-27d8-4ce1-82ca-5143fb76af9a\") " pod="openstack/dnsmasq-dns-57d769cc4f-wcmbz" Oct 11 04:09:45 crc kubenswrapper[4798]: I1011 04:09:45.616444 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-htmsh\" (UniqueName: \"kubernetes.io/projected/ef82cf1a-27d8-4ce1-82ca-5143fb76af9a-kube-api-access-htmsh\") pod \"dnsmasq-dns-57d769cc4f-wcmbz\" (UID: \"ef82cf1a-27d8-4ce1-82ca-5143fb76af9a\") " pod="openstack/dnsmasq-dns-57d769cc4f-wcmbz" Oct 11 04:09:45 crc kubenswrapper[4798]: I1011 04:09:45.721520 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ef82cf1a-27d8-4ce1-82ca-5143fb76af9a-config\") pod \"dnsmasq-dns-57d769cc4f-wcmbz\" (UID: \"ef82cf1a-27d8-4ce1-82ca-5143fb76af9a\") " pod="openstack/dnsmasq-dns-57d769cc4f-wcmbz" Oct 11 04:09:45 crc kubenswrapper[4798]: I1011 04:09:45.722086 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ef82cf1a-27d8-4ce1-82ca-5143fb76af9a-dns-svc\") pod \"dnsmasq-dns-57d769cc4f-wcmbz\" (UID: \"ef82cf1a-27d8-4ce1-82ca-5143fb76af9a\") " pod="openstack/dnsmasq-dns-57d769cc4f-wcmbz" Oct 11 04:09:45 crc kubenswrapper[4798]: I1011 04:09:45.722121 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-htmsh\" (UniqueName: \"kubernetes.io/projected/ef82cf1a-27d8-4ce1-82ca-5143fb76af9a-kube-api-access-htmsh\") pod \"dnsmasq-dns-57d769cc4f-wcmbz\" (UID: \"ef82cf1a-27d8-4ce1-82ca-5143fb76af9a\") " pod="openstack/dnsmasq-dns-57d769cc4f-wcmbz" Oct 11 04:09:45 crc kubenswrapper[4798]: I1011 04:09:45.723627 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ef82cf1a-27d8-4ce1-82ca-5143fb76af9a-dns-svc\") pod \"dnsmasq-dns-57d769cc4f-wcmbz\" (UID: \"ef82cf1a-27d8-4ce1-82ca-5143fb76af9a\") " pod="openstack/dnsmasq-dns-57d769cc4f-wcmbz" Oct 11 04:09:45 crc kubenswrapper[4798]: I1011 04:09:45.723622 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ef82cf1a-27d8-4ce1-82ca-5143fb76af9a-config\") pod \"dnsmasq-dns-57d769cc4f-wcmbz\" (UID: \"ef82cf1a-27d8-4ce1-82ca-5143fb76af9a\") " pod="openstack/dnsmasq-dns-57d769cc4f-wcmbz" Oct 11 04:09:45 crc kubenswrapper[4798]: I1011 04:09:45.771733 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-htmsh\" (UniqueName: \"kubernetes.io/projected/ef82cf1a-27d8-4ce1-82ca-5143fb76af9a-kube-api-access-htmsh\") pod \"dnsmasq-dns-57d769cc4f-wcmbz\" (UID: \"ef82cf1a-27d8-4ce1-82ca-5143fb76af9a\") " pod="openstack/dnsmasq-dns-57d769cc4f-wcmbz" Oct 11 04:09:45 crc kubenswrapper[4798]: I1011 04:09:45.865078 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-wcmbz" Oct 11 04:09:46 crc kubenswrapper[4798]: I1011 04:09:46.186412 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-cg6s5"] Oct 11 04:09:46 crc kubenswrapper[4798]: W1011 04:09:46.221811 4798 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod40f1f306_106b_4b2c_85bf_d97962d6db9a.slice/crio-2ef3b98679dd57042132a79492753e0403ee66cb23b99d67b6b41c0208ebd63a WatchSource:0}: Error finding container 2ef3b98679dd57042132a79492753e0403ee66cb23b99d67b6b41c0208ebd63a: Status 404 returned error can't find the container with id 2ef3b98679dd57042132a79492753e0403ee66cb23b99d67b6b41c0208ebd63a Oct 11 04:09:46 crc kubenswrapper[4798]: I1011 04:09:46.258162 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-0"] Oct 11 04:09:46 crc kubenswrapper[4798]: I1011 04:09:46.259731 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Oct 11 04:09:46 crc kubenswrapper[4798]: I1011 04:09:46.266258 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-default-user" Oct 11 04:09:46 crc kubenswrapper[4798]: I1011 04:09:46.266949 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-config-data" Oct 11 04:09:46 crc kubenswrapper[4798]: I1011 04:09:46.267852 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-plugins-conf" Oct 11 04:09:46 crc kubenswrapper[4798]: I1011 04:09:46.268098 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-server-conf" Oct 11 04:09:46 crc kubenswrapper[4798]: I1011 04:09:46.268440 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-svc" Oct 11 04:09:46 crc kubenswrapper[4798]: I1011 04:09:46.268666 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-erlang-cookie" Oct 11 04:09:46 crc kubenswrapper[4798]: I1011 04:09:46.271659 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-server-dockercfg-52nsp" Oct 11 04:09:46 crc kubenswrapper[4798]: I1011 04:09:46.278894 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Oct 11 04:09:46 crc kubenswrapper[4798]: I1011 04:09:46.285489 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-666b6646f7-cg6s5" event={"ID":"40f1f306-106b-4b2c-85bf-d97962d6db9a","Type":"ContainerStarted","Data":"2ef3b98679dd57042132a79492753e0403ee66cb23b99d67b6b41c0208ebd63a"} Oct 11 04:09:46 crc kubenswrapper[4798]: I1011 04:09:46.446213 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/eff19131-d13b-495f-94a3-097e030fd3cd-server-conf\") pod \"rabbitmq-server-0\" (UID: \"eff19131-d13b-495f-94a3-097e030fd3cd\") " pod="openstack/rabbitmq-server-0" Oct 11 04:09:46 crc kubenswrapper[4798]: I1011 04:09:46.446270 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/eff19131-d13b-495f-94a3-097e030fd3cd-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"eff19131-d13b-495f-94a3-097e030fd3cd\") " pod="openstack/rabbitmq-server-0" Oct 11 04:09:46 crc kubenswrapper[4798]: I1011 04:09:46.446300 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-server-0\" (UID: \"eff19131-d13b-495f-94a3-097e030fd3cd\") " pod="openstack/rabbitmq-server-0" Oct 11 04:09:46 crc kubenswrapper[4798]: I1011 04:09:46.446325 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/eff19131-d13b-495f-94a3-097e030fd3cd-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"eff19131-d13b-495f-94a3-097e030fd3cd\") " pod="openstack/rabbitmq-server-0" Oct 11 04:09:46 crc kubenswrapper[4798]: I1011 04:09:46.446363 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/eff19131-d13b-495f-94a3-097e030fd3cd-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"eff19131-d13b-495f-94a3-097e030fd3cd\") " pod="openstack/rabbitmq-server-0" Oct 11 04:09:46 crc kubenswrapper[4798]: I1011 04:09:46.446420 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/eff19131-d13b-495f-94a3-097e030fd3cd-pod-info\") pod \"rabbitmq-server-0\" (UID: \"eff19131-d13b-495f-94a3-097e030fd3cd\") " pod="openstack/rabbitmq-server-0" Oct 11 04:09:46 crc kubenswrapper[4798]: I1011 04:09:46.446443 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/eff19131-d13b-495f-94a3-097e030fd3cd-config-data\") pod \"rabbitmq-server-0\" (UID: \"eff19131-d13b-495f-94a3-097e030fd3cd\") " pod="openstack/rabbitmq-server-0" Oct 11 04:09:46 crc kubenswrapper[4798]: I1011 04:09:46.446465 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/eff19131-d13b-495f-94a3-097e030fd3cd-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"eff19131-d13b-495f-94a3-097e030fd3cd\") " pod="openstack/rabbitmq-server-0" Oct 11 04:09:46 crc kubenswrapper[4798]: I1011 04:09:46.446482 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/eff19131-d13b-495f-94a3-097e030fd3cd-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"eff19131-d13b-495f-94a3-097e030fd3cd\") " pod="openstack/rabbitmq-server-0" Oct 11 04:09:46 crc kubenswrapper[4798]: I1011 04:09:46.447297 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/eff19131-d13b-495f-94a3-097e030fd3cd-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"eff19131-d13b-495f-94a3-097e030fd3cd\") " pod="openstack/rabbitmq-server-0" Oct 11 04:09:46 crc kubenswrapper[4798]: I1011 04:09:46.447322 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wjb9j\" (UniqueName: \"kubernetes.io/projected/eff19131-d13b-495f-94a3-097e030fd3cd-kube-api-access-wjb9j\") pod \"rabbitmq-server-0\" (UID: \"eff19131-d13b-495f-94a3-097e030fd3cd\") " pod="openstack/rabbitmq-server-0" Oct 11 04:09:46 crc kubenswrapper[4798]: I1011 04:09:46.527297 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-wcmbz"] Oct 11 04:09:46 crc kubenswrapper[4798]: I1011 04:09:46.548633 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/eff19131-d13b-495f-94a3-097e030fd3cd-server-conf\") pod \"rabbitmq-server-0\" (UID: \"eff19131-d13b-495f-94a3-097e030fd3cd\") " pod="openstack/rabbitmq-server-0" Oct 11 04:09:46 crc kubenswrapper[4798]: I1011 04:09:46.548691 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/eff19131-d13b-495f-94a3-097e030fd3cd-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"eff19131-d13b-495f-94a3-097e030fd3cd\") " pod="openstack/rabbitmq-server-0" Oct 11 04:09:46 crc kubenswrapper[4798]: I1011 04:09:46.548725 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-server-0\" (UID: \"eff19131-d13b-495f-94a3-097e030fd3cd\") " pod="openstack/rabbitmq-server-0" Oct 11 04:09:46 crc kubenswrapper[4798]: I1011 04:09:46.548753 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/eff19131-d13b-495f-94a3-097e030fd3cd-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"eff19131-d13b-495f-94a3-097e030fd3cd\") " pod="openstack/rabbitmq-server-0" Oct 11 04:09:46 crc kubenswrapper[4798]: I1011 04:09:46.548791 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/eff19131-d13b-495f-94a3-097e030fd3cd-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"eff19131-d13b-495f-94a3-097e030fd3cd\") " pod="openstack/rabbitmq-server-0" Oct 11 04:09:46 crc kubenswrapper[4798]: I1011 04:09:46.548834 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/eff19131-d13b-495f-94a3-097e030fd3cd-pod-info\") pod \"rabbitmq-server-0\" (UID: \"eff19131-d13b-495f-94a3-097e030fd3cd\") " pod="openstack/rabbitmq-server-0" Oct 11 04:09:46 crc kubenswrapper[4798]: I1011 04:09:46.548857 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/eff19131-d13b-495f-94a3-097e030fd3cd-config-data\") pod \"rabbitmq-server-0\" (UID: \"eff19131-d13b-495f-94a3-097e030fd3cd\") " pod="openstack/rabbitmq-server-0" Oct 11 04:09:46 crc kubenswrapper[4798]: I1011 04:09:46.548879 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/eff19131-d13b-495f-94a3-097e030fd3cd-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"eff19131-d13b-495f-94a3-097e030fd3cd\") " pod="openstack/rabbitmq-server-0" Oct 11 04:09:46 crc kubenswrapper[4798]: I1011 04:09:46.548899 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/eff19131-d13b-495f-94a3-097e030fd3cd-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"eff19131-d13b-495f-94a3-097e030fd3cd\") " pod="openstack/rabbitmq-server-0" Oct 11 04:09:46 crc kubenswrapper[4798]: I1011 04:09:46.548921 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/eff19131-d13b-495f-94a3-097e030fd3cd-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"eff19131-d13b-495f-94a3-097e030fd3cd\") " pod="openstack/rabbitmq-server-0" Oct 11 04:09:46 crc kubenswrapper[4798]: I1011 04:09:46.548944 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wjb9j\" (UniqueName: \"kubernetes.io/projected/eff19131-d13b-495f-94a3-097e030fd3cd-kube-api-access-wjb9j\") pod \"rabbitmq-server-0\" (UID: \"eff19131-d13b-495f-94a3-097e030fd3cd\") " pod="openstack/rabbitmq-server-0" Oct 11 04:09:46 crc kubenswrapper[4798]: I1011 04:09:46.549417 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/eff19131-d13b-495f-94a3-097e030fd3cd-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"eff19131-d13b-495f-94a3-097e030fd3cd\") " pod="openstack/rabbitmq-server-0" Oct 11 04:09:46 crc kubenswrapper[4798]: I1011 04:09:46.549832 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/eff19131-d13b-495f-94a3-097e030fd3cd-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"eff19131-d13b-495f-94a3-097e030fd3cd\") " pod="openstack/rabbitmq-server-0" Oct 11 04:09:46 crc kubenswrapper[4798]: I1011 04:09:46.550074 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/eff19131-d13b-495f-94a3-097e030fd3cd-server-conf\") pod \"rabbitmq-server-0\" (UID: \"eff19131-d13b-495f-94a3-097e030fd3cd\") " pod="openstack/rabbitmq-server-0" Oct 11 04:09:46 crc kubenswrapper[4798]: I1011 04:09:46.550069 4798 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-server-0\" (UID: \"eff19131-d13b-495f-94a3-097e030fd3cd\") device mount path \"/mnt/openstack/pv01\"" pod="openstack/rabbitmq-server-0" Oct 11 04:09:46 crc kubenswrapper[4798]: I1011 04:09:46.550098 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/eff19131-d13b-495f-94a3-097e030fd3cd-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"eff19131-d13b-495f-94a3-097e030fd3cd\") " pod="openstack/rabbitmq-server-0" Oct 11 04:09:46 crc kubenswrapper[4798]: I1011 04:09:46.550771 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/eff19131-d13b-495f-94a3-097e030fd3cd-config-data\") pod \"rabbitmq-server-0\" (UID: \"eff19131-d13b-495f-94a3-097e030fd3cd\") " pod="openstack/rabbitmq-server-0" Oct 11 04:09:46 crc kubenswrapper[4798]: I1011 04:09:46.556153 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/eff19131-d13b-495f-94a3-097e030fd3cd-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"eff19131-d13b-495f-94a3-097e030fd3cd\") " pod="openstack/rabbitmq-server-0" Oct 11 04:09:46 crc kubenswrapper[4798]: I1011 04:09:46.558947 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/eff19131-d13b-495f-94a3-097e030fd3cd-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"eff19131-d13b-495f-94a3-097e030fd3cd\") " pod="openstack/rabbitmq-server-0" Oct 11 04:09:46 crc kubenswrapper[4798]: I1011 04:09:46.560299 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/eff19131-d13b-495f-94a3-097e030fd3cd-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"eff19131-d13b-495f-94a3-097e030fd3cd\") " pod="openstack/rabbitmq-server-0" Oct 11 04:09:46 crc kubenswrapper[4798]: I1011 04:09:46.562927 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/eff19131-d13b-495f-94a3-097e030fd3cd-pod-info\") pod \"rabbitmq-server-0\" (UID: \"eff19131-d13b-495f-94a3-097e030fd3cd\") " pod="openstack/rabbitmq-server-0" Oct 11 04:09:46 crc kubenswrapper[4798]: I1011 04:09:46.573849 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wjb9j\" (UniqueName: \"kubernetes.io/projected/eff19131-d13b-495f-94a3-097e030fd3cd-kube-api-access-wjb9j\") pod \"rabbitmq-server-0\" (UID: \"eff19131-d13b-495f-94a3-097e030fd3cd\") " pod="openstack/rabbitmq-server-0" Oct 11 04:09:46 crc kubenswrapper[4798]: I1011 04:09:46.576141 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-server-0\" (UID: \"eff19131-d13b-495f-94a3-097e030fd3cd\") " pod="openstack/rabbitmq-server-0" Oct 11 04:09:46 crc kubenswrapper[4798]: I1011 04:09:46.587048 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Oct 11 04:09:46 crc kubenswrapper[4798]: I1011 04:09:46.616012 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Oct 11 04:09:46 crc kubenswrapper[4798]: I1011 04:09:46.618063 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Oct 11 04:09:46 crc kubenswrapper[4798]: I1011 04:09:46.621676 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Oct 11 04:09:46 crc kubenswrapper[4798]: I1011 04:09:46.621738 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-qcx8n" Oct 11 04:09:46 crc kubenswrapper[4798]: I1011 04:09:46.621683 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-config-data" Oct 11 04:09:46 crc kubenswrapper[4798]: I1011 04:09:46.621898 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Oct 11 04:09:46 crc kubenswrapper[4798]: I1011 04:09:46.622162 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Oct 11 04:09:46 crc kubenswrapper[4798]: I1011 04:09:46.622700 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-cell1-svc" Oct 11 04:09:46 crc kubenswrapper[4798]: I1011 04:09:46.623061 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Oct 11 04:09:46 crc kubenswrapper[4798]: I1011 04:09:46.645580 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Oct 11 04:09:46 crc kubenswrapper[4798]: I1011 04:09:46.753807 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/f7b18bbb-aead-4356-99b3-f1ee253f86e8-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"f7b18bbb-aead-4356-99b3-f1ee253f86e8\") " pod="openstack/rabbitmq-cell1-server-0" Oct 11 04:09:46 crc kubenswrapper[4798]: I1011 04:09:46.754519 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/f7b18bbb-aead-4356-99b3-f1ee253f86e8-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"f7b18bbb-aead-4356-99b3-f1ee253f86e8\") " pod="openstack/rabbitmq-cell1-server-0" Oct 11 04:09:46 crc kubenswrapper[4798]: I1011 04:09:46.754638 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/f7b18bbb-aead-4356-99b3-f1ee253f86e8-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"f7b18bbb-aead-4356-99b3-f1ee253f86e8\") " pod="openstack/rabbitmq-cell1-server-0" Oct 11 04:09:46 crc kubenswrapper[4798]: I1011 04:09:46.754683 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-57jnv\" (UniqueName: \"kubernetes.io/projected/f7b18bbb-aead-4356-99b3-f1ee253f86e8-kube-api-access-57jnv\") pod \"rabbitmq-cell1-server-0\" (UID: \"f7b18bbb-aead-4356-99b3-f1ee253f86e8\") " pod="openstack/rabbitmq-cell1-server-0" Oct 11 04:09:46 crc kubenswrapper[4798]: I1011 04:09:46.754712 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/f7b18bbb-aead-4356-99b3-f1ee253f86e8-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"f7b18bbb-aead-4356-99b3-f1ee253f86e8\") " pod="openstack/rabbitmq-cell1-server-0" Oct 11 04:09:46 crc kubenswrapper[4798]: I1011 04:09:46.754740 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/f7b18bbb-aead-4356-99b3-f1ee253f86e8-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"f7b18bbb-aead-4356-99b3-f1ee253f86e8\") " pod="openstack/rabbitmq-cell1-server-0" Oct 11 04:09:46 crc kubenswrapper[4798]: I1011 04:09:46.754787 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"f7b18bbb-aead-4356-99b3-f1ee253f86e8\") " pod="openstack/rabbitmq-cell1-server-0" Oct 11 04:09:46 crc kubenswrapper[4798]: I1011 04:09:46.754904 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/f7b18bbb-aead-4356-99b3-f1ee253f86e8-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"f7b18bbb-aead-4356-99b3-f1ee253f86e8\") " pod="openstack/rabbitmq-cell1-server-0" Oct 11 04:09:46 crc kubenswrapper[4798]: I1011 04:09:46.754927 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/f7b18bbb-aead-4356-99b3-f1ee253f86e8-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"f7b18bbb-aead-4356-99b3-f1ee253f86e8\") " pod="openstack/rabbitmq-cell1-server-0" Oct 11 04:09:46 crc kubenswrapper[4798]: I1011 04:09:46.754995 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/f7b18bbb-aead-4356-99b3-f1ee253f86e8-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"f7b18bbb-aead-4356-99b3-f1ee253f86e8\") " pod="openstack/rabbitmq-cell1-server-0" Oct 11 04:09:46 crc kubenswrapper[4798]: I1011 04:09:46.755058 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/f7b18bbb-aead-4356-99b3-f1ee253f86e8-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"f7b18bbb-aead-4356-99b3-f1ee253f86e8\") " pod="openstack/rabbitmq-cell1-server-0" Oct 11 04:09:46 crc kubenswrapper[4798]: I1011 04:09:46.858182 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"f7b18bbb-aead-4356-99b3-f1ee253f86e8\") " pod="openstack/rabbitmq-cell1-server-0" Oct 11 04:09:46 crc kubenswrapper[4798]: I1011 04:09:46.858270 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/f7b18bbb-aead-4356-99b3-f1ee253f86e8-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"f7b18bbb-aead-4356-99b3-f1ee253f86e8\") " pod="openstack/rabbitmq-cell1-server-0" Oct 11 04:09:46 crc kubenswrapper[4798]: I1011 04:09:46.858290 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/f7b18bbb-aead-4356-99b3-f1ee253f86e8-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"f7b18bbb-aead-4356-99b3-f1ee253f86e8\") " pod="openstack/rabbitmq-cell1-server-0" Oct 11 04:09:46 crc kubenswrapper[4798]: I1011 04:09:46.858309 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/f7b18bbb-aead-4356-99b3-f1ee253f86e8-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"f7b18bbb-aead-4356-99b3-f1ee253f86e8\") " pod="openstack/rabbitmq-cell1-server-0" Oct 11 04:09:46 crc kubenswrapper[4798]: I1011 04:09:46.858352 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/f7b18bbb-aead-4356-99b3-f1ee253f86e8-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"f7b18bbb-aead-4356-99b3-f1ee253f86e8\") " pod="openstack/rabbitmq-cell1-server-0" Oct 11 04:09:46 crc kubenswrapper[4798]: I1011 04:09:46.858372 4798 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"f7b18bbb-aead-4356-99b3-f1ee253f86e8\") device mount path \"/mnt/openstack/pv11\"" pod="openstack/rabbitmq-cell1-server-0" Oct 11 04:09:46 crc kubenswrapper[4798]: I1011 04:09:46.858410 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/f7b18bbb-aead-4356-99b3-f1ee253f86e8-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"f7b18bbb-aead-4356-99b3-f1ee253f86e8\") " pod="openstack/rabbitmq-cell1-server-0" Oct 11 04:09:46 crc kubenswrapper[4798]: I1011 04:09:46.858439 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/f7b18bbb-aead-4356-99b3-f1ee253f86e8-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"f7b18bbb-aead-4356-99b3-f1ee253f86e8\") " pod="openstack/rabbitmq-cell1-server-0" Oct 11 04:09:46 crc kubenswrapper[4798]: I1011 04:09:46.858874 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/f7b18bbb-aead-4356-99b3-f1ee253f86e8-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"f7b18bbb-aead-4356-99b3-f1ee253f86e8\") " pod="openstack/rabbitmq-cell1-server-0" Oct 11 04:09:46 crc kubenswrapper[4798]: I1011 04:09:46.858894 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-57jnv\" (UniqueName: \"kubernetes.io/projected/f7b18bbb-aead-4356-99b3-f1ee253f86e8-kube-api-access-57jnv\") pod \"rabbitmq-cell1-server-0\" (UID: \"f7b18bbb-aead-4356-99b3-f1ee253f86e8\") " pod="openstack/rabbitmq-cell1-server-0" Oct 11 04:09:46 crc kubenswrapper[4798]: I1011 04:09:46.858914 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/f7b18bbb-aead-4356-99b3-f1ee253f86e8-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"f7b18bbb-aead-4356-99b3-f1ee253f86e8\") " pod="openstack/rabbitmq-cell1-server-0" Oct 11 04:09:46 crc kubenswrapper[4798]: I1011 04:09:46.858933 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/f7b18bbb-aead-4356-99b3-f1ee253f86e8-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"f7b18bbb-aead-4356-99b3-f1ee253f86e8\") " pod="openstack/rabbitmq-cell1-server-0" Oct 11 04:09:46 crc kubenswrapper[4798]: I1011 04:09:46.859750 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/f7b18bbb-aead-4356-99b3-f1ee253f86e8-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"f7b18bbb-aead-4356-99b3-f1ee253f86e8\") " pod="openstack/rabbitmq-cell1-server-0" Oct 11 04:09:46 crc kubenswrapper[4798]: I1011 04:09:46.859779 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/f7b18bbb-aead-4356-99b3-f1ee253f86e8-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"f7b18bbb-aead-4356-99b3-f1ee253f86e8\") " pod="openstack/rabbitmq-cell1-server-0" Oct 11 04:09:46 crc kubenswrapper[4798]: I1011 04:09:46.860270 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/f7b18bbb-aead-4356-99b3-f1ee253f86e8-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"f7b18bbb-aead-4356-99b3-f1ee253f86e8\") " pod="openstack/rabbitmq-cell1-server-0" Oct 11 04:09:46 crc kubenswrapper[4798]: I1011 04:09:46.861611 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/f7b18bbb-aead-4356-99b3-f1ee253f86e8-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"f7b18bbb-aead-4356-99b3-f1ee253f86e8\") " pod="openstack/rabbitmq-cell1-server-0" Oct 11 04:09:46 crc kubenswrapper[4798]: I1011 04:09:46.866928 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/f7b18bbb-aead-4356-99b3-f1ee253f86e8-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"f7b18bbb-aead-4356-99b3-f1ee253f86e8\") " pod="openstack/rabbitmq-cell1-server-0" Oct 11 04:09:46 crc kubenswrapper[4798]: I1011 04:09:46.867580 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/f7b18bbb-aead-4356-99b3-f1ee253f86e8-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"f7b18bbb-aead-4356-99b3-f1ee253f86e8\") " pod="openstack/rabbitmq-cell1-server-0" Oct 11 04:09:46 crc kubenswrapper[4798]: I1011 04:09:46.869299 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/f7b18bbb-aead-4356-99b3-f1ee253f86e8-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"f7b18bbb-aead-4356-99b3-f1ee253f86e8\") " pod="openstack/rabbitmq-cell1-server-0" Oct 11 04:09:46 crc kubenswrapper[4798]: I1011 04:09:46.869546 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/f7b18bbb-aead-4356-99b3-f1ee253f86e8-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"f7b18bbb-aead-4356-99b3-f1ee253f86e8\") " pod="openstack/rabbitmq-cell1-server-0" Oct 11 04:09:46 crc kubenswrapper[4798]: I1011 04:09:46.887841 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"f7b18bbb-aead-4356-99b3-f1ee253f86e8\") " pod="openstack/rabbitmq-cell1-server-0" Oct 11 04:09:46 crc kubenswrapper[4798]: I1011 04:09:46.893997 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-57jnv\" (UniqueName: \"kubernetes.io/projected/f7b18bbb-aead-4356-99b3-f1ee253f86e8-kube-api-access-57jnv\") pod \"rabbitmq-cell1-server-0\" (UID: \"f7b18bbb-aead-4356-99b3-f1ee253f86e8\") " pod="openstack/rabbitmq-cell1-server-0" Oct 11 04:09:46 crc kubenswrapper[4798]: I1011 04:09:46.895286 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/f7b18bbb-aead-4356-99b3-f1ee253f86e8-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"f7b18bbb-aead-4356-99b3-f1ee253f86e8\") " pod="openstack/rabbitmq-cell1-server-0" Oct 11 04:09:46 crc kubenswrapper[4798]: I1011 04:09:46.950032 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Oct 11 04:09:47 crc kubenswrapper[4798]: I1011 04:09:47.975215 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-galera-0"] Oct 11 04:09:47 crc kubenswrapper[4798]: I1011 04:09:47.978638 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Oct 11 04:09:47 crc kubenswrapper[4798]: I1011 04:09:47.986490 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-dockercfg-7cmjf" Oct 11 04:09:47 crc kubenswrapper[4798]: I1011 04:09:47.987120 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-svc" Oct 11 04:09:47 crc kubenswrapper[4798]: I1011 04:09:47.987753 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Oct 11 04:09:47 crc kubenswrapper[4798]: I1011 04:09:47.987765 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config-data" Oct 11 04:09:47 crc kubenswrapper[4798]: I1011 04:09:47.990700 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-scripts" Oct 11 04:09:47 crc kubenswrapper[4798]: I1011 04:09:47.991239 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"combined-ca-bundle" Oct 11 04:09:47 crc kubenswrapper[4798]: I1011 04:09:47.991921 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Oct 11 04:09:48 crc kubenswrapper[4798]: I1011 04:09:48.079915 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/54e4a0f3-c43f-418e-b105-fd19db3ce615-kolla-config\") pod \"openstack-galera-0\" (UID: \"54e4a0f3-c43f-418e-b105-fd19db3ce615\") " pod="openstack/openstack-galera-0" Oct 11 04:09:48 crc kubenswrapper[4798]: I1011 04:09:48.079985 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/54e4a0f3-c43f-418e-b105-fd19db3ce615-config-data-default\") pod \"openstack-galera-0\" (UID: \"54e4a0f3-c43f-418e-b105-fd19db3ce615\") " pod="openstack/openstack-galera-0" Oct 11 04:09:48 crc kubenswrapper[4798]: I1011 04:09:48.080312 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/54e4a0f3-c43f-418e-b105-fd19db3ce615-config-data-generated\") pod \"openstack-galera-0\" (UID: \"54e4a0f3-c43f-418e-b105-fd19db3ce615\") " pod="openstack/openstack-galera-0" Oct 11 04:09:48 crc kubenswrapper[4798]: I1011 04:09:48.080431 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/54e4a0f3-c43f-418e-b105-fd19db3ce615-operator-scripts\") pod \"openstack-galera-0\" (UID: \"54e4a0f3-c43f-418e-b105-fd19db3ce615\") " pod="openstack/openstack-galera-0" Oct 11 04:09:48 crc kubenswrapper[4798]: I1011 04:09:48.080466 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/54e4a0f3-c43f-418e-b105-fd19db3ce615-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"54e4a0f3-c43f-418e-b105-fd19db3ce615\") " pod="openstack/openstack-galera-0" Oct 11 04:09:48 crc kubenswrapper[4798]: I1011 04:09:48.080502 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"openstack-galera-0\" (UID: \"54e4a0f3-c43f-418e-b105-fd19db3ce615\") " pod="openstack/openstack-galera-0" Oct 11 04:09:48 crc kubenswrapper[4798]: I1011 04:09:48.080688 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/54e4a0f3-c43f-418e-b105-fd19db3ce615-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"54e4a0f3-c43f-418e-b105-fd19db3ce615\") " pod="openstack/openstack-galera-0" Oct 11 04:09:48 crc kubenswrapper[4798]: I1011 04:09:48.080847 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secrets\" (UniqueName: \"kubernetes.io/secret/54e4a0f3-c43f-418e-b105-fd19db3ce615-secrets\") pod \"openstack-galera-0\" (UID: \"54e4a0f3-c43f-418e-b105-fd19db3ce615\") " pod="openstack/openstack-galera-0" Oct 11 04:09:48 crc kubenswrapper[4798]: I1011 04:09:48.080885 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fqsq6\" (UniqueName: \"kubernetes.io/projected/54e4a0f3-c43f-418e-b105-fd19db3ce615-kube-api-access-fqsq6\") pod \"openstack-galera-0\" (UID: \"54e4a0f3-c43f-418e-b105-fd19db3ce615\") " pod="openstack/openstack-galera-0" Oct 11 04:09:48 crc kubenswrapper[4798]: I1011 04:09:48.183117 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/54e4a0f3-c43f-418e-b105-fd19db3ce615-config-data-generated\") pod \"openstack-galera-0\" (UID: \"54e4a0f3-c43f-418e-b105-fd19db3ce615\") " pod="openstack/openstack-galera-0" Oct 11 04:09:48 crc kubenswrapper[4798]: I1011 04:09:48.183190 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/54e4a0f3-c43f-418e-b105-fd19db3ce615-operator-scripts\") pod \"openstack-galera-0\" (UID: \"54e4a0f3-c43f-418e-b105-fd19db3ce615\") " pod="openstack/openstack-galera-0" Oct 11 04:09:48 crc kubenswrapper[4798]: I1011 04:09:48.183226 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/54e4a0f3-c43f-418e-b105-fd19db3ce615-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"54e4a0f3-c43f-418e-b105-fd19db3ce615\") " pod="openstack/openstack-galera-0" Oct 11 04:09:48 crc kubenswrapper[4798]: I1011 04:09:48.183894 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/54e4a0f3-c43f-418e-b105-fd19db3ce615-config-data-generated\") pod \"openstack-galera-0\" (UID: \"54e4a0f3-c43f-418e-b105-fd19db3ce615\") " pod="openstack/openstack-galera-0" Oct 11 04:09:48 crc kubenswrapper[4798]: I1011 04:09:48.183963 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"openstack-galera-0\" (UID: \"54e4a0f3-c43f-418e-b105-fd19db3ce615\") " pod="openstack/openstack-galera-0" Oct 11 04:09:48 crc kubenswrapper[4798]: I1011 04:09:48.184315 4798 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"openstack-galera-0\" (UID: \"54e4a0f3-c43f-418e-b105-fd19db3ce615\") device mount path \"/mnt/openstack/pv02\"" pod="openstack/openstack-galera-0" Oct 11 04:09:48 crc kubenswrapper[4798]: I1011 04:09:48.184440 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/54e4a0f3-c43f-418e-b105-fd19db3ce615-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"54e4a0f3-c43f-418e-b105-fd19db3ce615\") " pod="openstack/openstack-galera-0" Oct 11 04:09:48 crc kubenswrapper[4798]: I1011 04:09:48.184997 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fqsq6\" (UniqueName: \"kubernetes.io/projected/54e4a0f3-c43f-418e-b105-fd19db3ce615-kube-api-access-fqsq6\") pod \"openstack-galera-0\" (UID: \"54e4a0f3-c43f-418e-b105-fd19db3ce615\") " pod="openstack/openstack-galera-0" Oct 11 04:09:48 crc kubenswrapper[4798]: I1011 04:09:48.185035 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secrets\" (UniqueName: \"kubernetes.io/secret/54e4a0f3-c43f-418e-b105-fd19db3ce615-secrets\") pod \"openstack-galera-0\" (UID: \"54e4a0f3-c43f-418e-b105-fd19db3ce615\") " pod="openstack/openstack-galera-0" Oct 11 04:09:48 crc kubenswrapper[4798]: I1011 04:09:48.185100 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/54e4a0f3-c43f-418e-b105-fd19db3ce615-kolla-config\") pod \"openstack-galera-0\" (UID: \"54e4a0f3-c43f-418e-b105-fd19db3ce615\") " pod="openstack/openstack-galera-0" Oct 11 04:09:48 crc kubenswrapper[4798]: I1011 04:09:48.185175 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/54e4a0f3-c43f-418e-b105-fd19db3ce615-config-data-default\") pod \"openstack-galera-0\" (UID: \"54e4a0f3-c43f-418e-b105-fd19db3ce615\") " pod="openstack/openstack-galera-0" Oct 11 04:09:48 crc kubenswrapper[4798]: I1011 04:09:48.186515 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/54e4a0f3-c43f-418e-b105-fd19db3ce615-operator-scripts\") pod \"openstack-galera-0\" (UID: \"54e4a0f3-c43f-418e-b105-fd19db3ce615\") " pod="openstack/openstack-galera-0" Oct 11 04:09:48 crc kubenswrapper[4798]: I1011 04:09:48.187121 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/54e4a0f3-c43f-418e-b105-fd19db3ce615-config-data-default\") pod \"openstack-galera-0\" (UID: \"54e4a0f3-c43f-418e-b105-fd19db3ce615\") " pod="openstack/openstack-galera-0" Oct 11 04:09:48 crc kubenswrapper[4798]: I1011 04:09:48.187931 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/54e4a0f3-c43f-418e-b105-fd19db3ce615-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"54e4a0f3-c43f-418e-b105-fd19db3ce615\") " pod="openstack/openstack-galera-0" Oct 11 04:09:48 crc kubenswrapper[4798]: I1011 04:09:48.188138 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/54e4a0f3-c43f-418e-b105-fd19db3ce615-kolla-config\") pod \"openstack-galera-0\" (UID: \"54e4a0f3-c43f-418e-b105-fd19db3ce615\") " pod="openstack/openstack-galera-0" Oct 11 04:09:48 crc kubenswrapper[4798]: I1011 04:09:48.192704 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secrets\" (UniqueName: \"kubernetes.io/secret/54e4a0f3-c43f-418e-b105-fd19db3ce615-secrets\") pod \"openstack-galera-0\" (UID: \"54e4a0f3-c43f-418e-b105-fd19db3ce615\") " pod="openstack/openstack-galera-0" Oct 11 04:09:48 crc kubenswrapper[4798]: I1011 04:09:48.192854 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/54e4a0f3-c43f-418e-b105-fd19db3ce615-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"54e4a0f3-c43f-418e-b105-fd19db3ce615\") " pod="openstack/openstack-galera-0" Oct 11 04:09:48 crc kubenswrapper[4798]: I1011 04:09:48.208786 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fqsq6\" (UniqueName: \"kubernetes.io/projected/54e4a0f3-c43f-418e-b105-fd19db3ce615-kube-api-access-fqsq6\") pod \"openstack-galera-0\" (UID: \"54e4a0f3-c43f-418e-b105-fd19db3ce615\") " pod="openstack/openstack-galera-0" Oct 11 04:09:48 crc kubenswrapper[4798]: I1011 04:09:48.280108 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"openstack-galera-0\" (UID: \"54e4a0f3-c43f-418e-b105-fd19db3ce615\") " pod="openstack/openstack-galera-0" Oct 11 04:09:48 crc kubenswrapper[4798]: I1011 04:09:48.327153 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Oct 11 04:09:49 crc kubenswrapper[4798]: I1011 04:09:49.421274 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-cell1-galera-0"] Oct 11 04:09:49 crc kubenswrapper[4798]: I1011 04:09:49.424218 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Oct 11 04:09:49 crc kubenswrapper[4798]: I1011 04:09:49.429678 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-cell1-svc" Oct 11 04:09:49 crc kubenswrapper[4798]: I1011 04:09:49.429775 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-scripts" Oct 11 04:09:49 crc kubenswrapper[4798]: I1011 04:09:49.429798 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-cell1-dockercfg-xsjh7" Oct 11 04:09:49 crc kubenswrapper[4798]: I1011 04:09:49.430664 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-config-data" Oct 11 04:09:49 crc kubenswrapper[4798]: I1011 04:09:49.469551 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Oct 11 04:09:49 crc kubenswrapper[4798]: I1011 04:09:49.517012 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/2d1fad00-1405-4149-96d6-7ef60d34c4f1-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"2d1fad00-1405-4149-96d6-7ef60d34c4f1\") " pod="openstack/openstack-cell1-galera-0" Oct 11 04:09:49 crc kubenswrapper[4798]: I1011 04:09:49.517097 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-md87m\" (UniqueName: \"kubernetes.io/projected/2d1fad00-1405-4149-96d6-7ef60d34c4f1-kube-api-access-md87m\") pod \"openstack-cell1-galera-0\" (UID: \"2d1fad00-1405-4149-96d6-7ef60d34c4f1\") " pod="openstack/openstack-cell1-galera-0" Oct 11 04:09:49 crc kubenswrapper[4798]: I1011 04:09:49.517159 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/2d1fad00-1405-4149-96d6-7ef60d34c4f1-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"2d1fad00-1405-4149-96d6-7ef60d34c4f1\") " pod="openstack/openstack-cell1-galera-0" Oct 11 04:09:49 crc kubenswrapper[4798]: I1011 04:09:49.517236 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secrets\" (UniqueName: \"kubernetes.io/secret/2d1fad00-1405-4149-96d6-7ef60d34c4f1-secrets\") pod \"openstack-cell1-galera-0\" (UID: \"2d1fad00-1405-4149-96d6-7ef60d34c4f1\") " pod="openstack/openstack-cell1-galera-0" Oct 11 04:09:49 crc kubenswrapper[4798]: I1011 04:09:49.517264 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2d1fad00-1405-4149-96d6-7ef60d34c4f1-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"2d1fad00-1405-4149-96d6-7ef60d34c4f1\") " pod="openstack/openstack-cell1-galera-0" Oct 11 04:09:49 crc kubenswrapper[4798]: I1011 04:09:49.517314 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/2d1fad00-1405-4149-96d6-7ef60d34c4f1-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"2d1fad00-1405-4149-96d6-7ef60d34c4f1\") " pod="openstack/openstack-cell1-galera-0" Oct 11 04:09:49 crc kubenswrapper[4798]: I1011 04:09:49.517342 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2d1fad00-1405-4149-96d6-7ef60d34c4f1-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"2d1fad00-1405-4149-96d6-7ef60d34c4f1\") " pod="openstack/openstack-cell1-galera-0" Oct 11 04:09:49 crc kubenswrapper[4798]: I1011 04:09:49.517503 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"openstack-cell1-galera-0\" (UID: \"2d1fad00-1405-4149-96d6-7ef60d34c4f1\") " pod="openstack/openstack-cell1-galera-0" Oct 11 04:09:49 crc kubenswrapper[4798]: I1011 04:09:49.517561 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/2d1fad00-1405-4149-96d6-7ef60d34c4f1-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"2d1fad00-1405-4149-96d6-7ef60d34c4f1\") " pod="openstack/openstack-cell1-galera-0" Oct 11 04:09:49 crc kubenswrapper[4798]: I1011 04:09:49.621264 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"openstack-cell1-galera-0\" (UID: \"2d1fad00-1405-4149-96d6-7ef60d34c4f1\") " pod="openstack/openstack-cell1-galera-0" Oct 11 04:09:49 crc kubenswrapper[4798]: I1011 04:09:49.621354 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/2d1fad00-1405-4149-96d6-7ef60d34c4f1-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"2d1fad00-1405-4149-96d6-7ef60d34c4f1\") " pod="openstack/openstack-cell1-galera-0" Oct 11 04:09:49 crc kubenswrapper[4798]: I1011 04:09:49.621442 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/2d1fad00-1405-4149-96d6-7ef60d34c4f1-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"2d1fad00-1405-4149-96d6-7ef60d34c4f1\") " pod="openstack/openstack-cell1-galera-0" Oct 11 04:09:49 crc kubenswrapper[4798]: I1011 04:09:49.621469 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-md87m\" (UniqueName: \"kubernetes.io/projected/2d1fad00-1405-4149-96d6-7ef60d34c4f1-kube-api-access-md87m\") pod \"openstack-cell1-galera-0\" (UID: \"2d1fad00-1405-4149-96d6-7ef60d34c4f1\") " pod="openstack/openstack-cell1-galera-0" Oct 11 04:09:49 crc kubenswrapper[4798]: I1011 04:09:49.621542 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/2d1fad00-1405-4149-96d6-7ef60d34c4f1-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"2d1fad00-1405-4149-96d6-7ef60d34c4f1\") " pod="openstack/openstack-cell1-galera-0" Oct 11 04:09:49 crc kubenswrapper[4798]: I1011 04:09:49.621769 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secrets\" (UniqueName: \"kubernetes.io/secret/2d1fad00-1405-4149-96d6-7ef60d34c4f1-secrets\") pod \"openstack-cell1-galera-0\" (UID: \"2d1fad00-1405-4149-96d6-7ef60d34c4f1\") " pod="openstack/openstack-cell1-galera-0" Oct 11 04:09:49 crc kubenswrapper[4798]: I1011 04:09:49.621793 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2d1fad00-1405-4149-96d6-7ef60d34c4f1-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"2d1fad00-1405-4149-96d6-7ef60d34c4f1\") " pod="openstack/openstack-cell1-galera-0" Oct 11 04:09:49 crc kubenswrapper[4798]: I1011 04:09:49.621824 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/2d1fad00-1405-4149-96d6-7ef60d34c4f1-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"2d1fad00-1405-4149-96d6-7ef60d34c4f1\") " pod="openstack/openstack-cell1-galera-0" Oct 11 04:09:49 crc kubenswrapper[4798]: I1011 04:09:49.621871 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2d1fad00-1405-4149-96d6-7ef60d34c4f1-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"2d1fad00-1405-4149-96d6-7ef60d34c4f1\") " pod="openstack/openstack-cell1-galera-0" Oct 11 04:09:49 crc kubenswrapper[4798]: I1011 04:09:49.621901 4798 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"openstack-cell1-galera-0\" (UID: \"2d1fad00-1405-4149-96d6-7ef60d34c4f1\") device mount path \"/mnt/openstack/pv03\"" pod="openstack/openstack-cell1-galera-0" Oct 11 04:09:49 crc kubenswrapper[4798]: I1011 04:09:49.623531 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2d1fad00-1405-4149-96d6-7ef60d34c4f1-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"2d1fad00-1405-4149-96d6-7ef60d34c4f1\") " pod="openstack/openstack-cell1-galera-0" Oct 11 04:09:49 crc kubenswrapper[4798]: I1011 04:09:49.623948 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/2d1fad00-1405-4149-96d6-7ef60d34c4f1-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"2d1fad00-1405-4149-96d6-7ef60d34c4f1\") " pod="openstack/openstack-cell1-galera-0" Oct 11 04:09:49 crc kubenswrapper[4798]: I1011 04:09:49.624371 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/2d1fad00-1405-4149-96d6-7ef60d34c4f1-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"2d1fad00-1405-4149-96d6-7ef60d34c4f1\") " pod="openstack/openstack-cell1-galera-0" Oct 11 04:09:49 crc kubenswrapper[4798]: I1011 04:09:49.628840 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/2d1fad00-1405-4149-96d6-7ef60d34c4f1-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"2d1fad00-1405-4149-96d6-7ef60d34c4f1\") " pod="openstack/openstack-cell1-galera-0" Oct 11 04:09:49 crc kubenswrapper[4798]: I1011 04:09:49.638446 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2d1fad00-1405-4149-96d6-7ef60d34c4f1-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"2d1fad00-1405-4149-96d6-7ef60d34c4f1\") " pod="openstack/openstack-cell1-galera-0" Oct 11 04:09:49 crc kubenswrapper[4798]: I1011 04:09:49.638667 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secrets\" (UniqueName: \"kubernetes.io/secret/2d1fad00-1405-4149-96d6-7ef60d34c4f1-secrets\") pod \"openstack-cell1-galera-0\" (UID: \"2d1fad00-1405-4149-96d6-7ef60d34c4f1\") " pod="openstack/openstack-cell1-galera-0" Oct 11 04:09:49 crc kubenswrapper[4798]: I1011 04:09:49.650292 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-md87m\" (UniqueName: \"kubernetes.io/projected/2d1fad00-1405-4149-96d6-7ef60d34c4f1-kube-api-access-md87m\") pod \"openstack-cell1-galera-0\" (UID: \"2d1fad00-1405-4149-96d6-7ef60d34c4f1\") " pod="openstack/openstack-cell1-galera-0" Oct 11 04:09:49 crc kubenswrapper[4798]: I1011 04:09:49.652192 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/2d1fad00-1405-4149-96d6-7ef60d34c4f1-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"2d1fad00-1405-4149-96d6-7ef60d34c4f1\") " pod="openstack/openstack-cell1-galera-0" Oct 11 04:09:49 crc kubenswrapper[4798]: I1011 04:09:49.683016 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"openstack-cell1-galera-0\" (UID: \"2d1fad00-1405-4149-96d6-7ef60d34c4f1\") " pod="openstack/openstack-cell1-galera-0" Oct 11 04:09:49 crc kubenswrapper[4798]: I1011 04:09:49.756434 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Oct 11 04:09:49 crc kubenswrapper[4798]: I1011 04:09:49.889575 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/memcached-0"] Oct 11 04:09:49 crc kubenswrapper[4798]: I1011 04:09:49.890538 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Oct 11 04:09:49 crc kubenswrapper[4798]: I1011 04:09:49.892247 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"memcached-memcached-dockercfg-prl66" Oct 11 04:09:49 crc kubenswrapper[4798]: I1011 04:09:49.892418 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"memcached-config-data" Oct 11 04:09:49 crc kubenswrapper[4798]: I1011 04:09:49.892655 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-memcached-svc" Oct 11 04:09:49 crc kubenswrapper[4798]: I1011 04:09:49.904441 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Oct 11 04:09:49 crc kubenswrapper[4798]: I1011 04:09:49.927943 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/a3433fc1-acd1-4b1e-9df2-578848000615-kolla-config\") pod \"memcached-0\" (UID: \"a3433fc1-acd1-4b1e-9df2-578848000615\") " pod="openstack/memcached-0" Oct 11 04:09:49 crc kubenswrapper[4798]: I1011 04:09:49.928005 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/a3433fc1-acd1-4b1e-9df2-578848000615-memcached-tls-certs\") pod \"memcached-0\" (UID: \"a3433fc1-acd1-4b1e-9df2-578848000615\") " pod="openstack/memcached-0" Oct 11 04:09:49 crc kubenswrapper[4798]: I1011 04:09:49.928121 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/a3433fc1-acd1-4b1e-9df2-578848000615-config-data\") pod \"memcached-0\" (UID: \"a3433fc1-acd1-4b1e-9df2-578848000615\") " pod="openstack/memcached-0" Oct 11 04:09:49 crc kubenswrapper[4798]: I1011 04:09:49.928144 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gvhcv\" (UniqueName: \"kubernetes.io/projected/a3433fc1-acd1-4b1e-9df2-578848000615-kube-api-access-gvhcv\") pod \"memcached-0\" (UID: \"a3433fc1-acd1-4b1e-9df2-578848000615\") " pod="openstack/memcached-0" Oct 11 04:09:49 crc kubenswrapper[4798]: I1011 04:09:49.928284 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a3433fc1-acd1-4b1e-9df2-578848000615-combined-ca-bundle\") pod \"memcached-0\" (UID: \"a3433fc1-acd1-4b1e-9df2-578848000615\") " pod="openstack/memcached-0" Oct 11 04:09:50 crc kubenswrapper[4798]: I1011 04:09:50.029565 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/a3433fc1-acd1-4b1e-9df2-578848000615-config-data\") pod \"memcached-0\" (UID: \"a3433fc1-acd1-4b1e-9df2-578848000615\") " pod="openstack/memcached-0" Oct 11 04:09:50 crc kubenswrapper[4798]: I1011 04:09:50.029611 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gvhcv\" (UniqueName: \"kubernetes.io/projected/a3433fc1-acd1-4b1e-9df2-578848000615-kube-api-access-gvhcv\") pod \"memcached-0\" (UID: \"a3433fc1-acd1-4b1e-9df2-578848000615\") " pod="openstack/memcached-0" Oct 11 04:09:50 crc kubenswrapper[4798]: I1011 04:09:50.029642 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a3433fc1-acd1-4b1e-9df2-578848000615-combined-ca-bundle\") pod \"memcached-0\" (UID: \"a3433fc1-acd1-4b1e-9df2-578848000615\") " pod="openstack/memcached-0" Oct 11 04:09:50 crc kubenswrapper[4798]: I1011 04:09:50.029694 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/a3433fc1-acd1-4b1e-9df2-578848000615-kolla-config\") pod \"memcached-0\" (UID: \"a3433fc1-acd1-4b1e-9df2-578848000615\") " pod="openstack/memcached-0" Oct 11 04:09:50 crc kubenswrapper[4798]: I1011 04:09:50.029714 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/a3433fc1-acd1-4b1e-9df2-578848000615-memcached-tls-certs\") pod \"memcached-0\" (UID: \"a3433fc1-acd1-4b1e-9df2-578848000615\") " pod="openstack/memcached-0" Oct 11 04:09:50 crc kubenswrapper[4798]: I1011 04:09:50.030421 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/a3433fc1-acd1-4b1e-9df2-578848000615-config-data\") pod \"memcached-0\" (UID: \"a3433fc1-acd1-4b1e-9df2-578848000615\") " pod="openstack/memcached-0" Oct 11 04:09:50 crc kubenswrapper[4798]: I1011 04:09:50.030701 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/a3433fc1-acd1-4b1e-9df2-578848000615-kolla-config\") pod \"memcached-0\" (UID: \"a3433fc1-acd1-4b1e-9df2-578848000615\") " pod="openstack/memcached-0" Oct 11 04:09:50 crc kubenswrapper[4798]: I1011 04:09:50.033556 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/a3433fc1-acd1-4b1e-9df2-578848000615-memcached-tls-certs\") pod \"memcached-0\" (UID: \"a3433fc1-acd1-4b1e-9df2-578848000615\") " pod="openstack/memcached-0" Oct 11 04:09:50 crc kubenswrapper[4798]: I1011 04:09:50.048578 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a3433fc1-acd1-4b1e-9df2-578848000615-combined-ca-bundle\") pod \"memcached-0\" (UID: \"a3433fc1-acd1-4b1e-9df2-578848000615\") " pod="openstack/memcached-0" Oct 11 04:09:50 crc kubenswrapper[4798]: I1011 04:09:50.056845 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gvhcv\" (UniqueName: \"kubernetes.io/projected/a3433fc1-acd1-4b1e-9df2-578848000615-kube-api-access-gvhcv\") pod \"memcached-0\" (UID: \"a3433fc1-acd1-4b1e-9df2-578848000615\") " pod="openstack/memcached-0" Oct 11 04:09:50 crc kubenswrapper[4798]: I1011 04:09:50.217847 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Oct 11 04:09:50 crc kubenswrapper[4798]: W1011 04:09:50.971988 4798 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podef82cf1a_27d8_4ce1_82ca_5143fb76af9a.slice/crio-0d64bad6f40466eda84f07a86d4a705b6b13e44710789e4d585dcd8269fb286b WatchSource:0}: Error finding container 0d64bad6f40466eda84f07a86d4a705b6b13e44710789e4d585dcd8269fb286b: Status 404 returned error can't find the container with id 0d64bad6f40466eda84f07a86d4a705b6b13e44710789e4d585dcd8269fb286b Oct 11 04:09:51 crc kubenswrapper[4798]: I1011 04:09:51.328451 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-wcmbz" event={"ID":"ef82cf1a-27d8-4ce1-82ca-5143fb76af9a","Type":"ContainerStarted","Data":"0d64bad6f40466eda84f07a86d4a705b6b13e44710789e4d585dcd8269fb286b"} Oct 11 04:09:51 crc kubenswrapper[4798]: I1011 04:09:51.530459 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Oct 11 04:09:51 crc kubenswrapper[4798]: I1011 04:09:51.531862 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Oct 11 04:09:51 crc kubenswrapper[4798]: I1011 04:09:51.544535 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-ceilometer-dockercfg-896fm" Oct 11 04:09:51 crc kubenswrapper[4798]: I1011 04:09:51.562820 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Oct 11 04:09:51 crc kubenswrapper[4798]: I1011 04:09:51.670298 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pbqfg\" (UniqueName: \"kubernetes.io/projected/6ff4b0aa-8ae0-4301-8bb1-6978cfa47207-kube-api-access-pbqfg\") pod \"kube-state-metrics-0\" (UID: \"6ff4b0aa-8ae0-4301-8bb1-6978cfa47207\") " pod="openstack/kube-state-metrics-0" Oct 11 04:09:51 crc kubenswrapper[4798]: I1011 04:09:51.772628 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pbqfg\" (UniqueName: \"kubernetes.io/projected/6ff4b0aa-8ae0-4301-8bb1-6978cfa47207-kube-api-access-pbqfg\") pod \"kube-state-metrics-0\" (UID: \"6ff4b0aa-8ae0-4301-8bb1-6978cfa47207\") " pod="openstack/kube-state-metrics-0" Oct 11 04:09:51 crc kubenswrapper[4798]: I1011 04:09:51.792568 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pbqfg\" (UniqueName: \"kubernetes.io/projected/6ff4b0aa-8ae0-4301-8bb1-6978cfa47207-kube-api-access-pbqfg\") pod \"kube-state-metrics-0\" (UID: \"6ff4b0aa-8ae0-4301-8bb1-6978cfa47207\") " pod="openstack/kube-state-metrics-0" Oct 11 04:09:51 crc kubenswrapper[4798]: I1011 04:09:51.855616 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Oct 11 04:09:55 crc kubenswrapper[4798]: I1011 04:09:55.419415 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-sv678"] Oct 11 04:09:55 crc kubenswrapper[4798]: I1011 04:09:55.420767 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-sv678" Oct 11 04:09:55 crc kubenswrapper[4798]: I1011 04:09:55.423268 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-scripts" Oct 11 04:09:55 crc kubenswrapper[4798]: I1011 04:09:55.423490 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncontroller-ovncontroller-dockercfg-qsmbm" Oct 11 04:09:55 crc kubenswrapper[4798]: I1011 04:09:55.423763 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovncontroller-ovndbs" Oct 11 04:09:55 crc kubenswrapper[4798]: I1011 04:09:55.462631 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-sv678"] Oct 11 04:09:55 crc kubenswrapper[4798]: I1011 04:09:55.484420 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-ovs-tk4gr"] Oct 11 04:09:55 crc kubenswrapper[4798]: I1011 04:09:55.498027 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-tk4gr"] Oct 11 04:09:55 crc kubenswrapper[4798]: I1011 04:09:55.498226 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-tk4gr" Oct 11 04:09:55 crc kubenswrapper[4798]: I1011 04:09:55.542764 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/7f9da136-ab4f-4a2e-99e4-e546aa4220f9-etc-ovs\") pod \"ovn-controller-ovs-tk4gr\" (UID: \"7f9da136-ab4f-4a2e-99e4-e546aa4220f9\") " pod="openstack/ovn-controller-ovs-tk4gr" Oct 11 04:09:55 crc kubenswrapper[4798]: I1011 04:09:55.542813 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/7f9da136-ab4f-4a2e-99e4-e546aa4220f9-var-lib\") pod \"ovn-controller-ovs-tk4gr\" (UID: \"7f9da136-ab4f-4a2e-99e4-e546aa4220f9\") " pod="openstack/ovn-controller-ovs-tk4gr" Oct 11 04:09:55 crc kubenswrapper[4798]: I1011 04:09:55.542849 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/3e260c01-682b-4b79-9ebe-c06c29750bfe-var-log-ovn\") pod \"ovn-controller-sv678\" (UID: \"3e260c01-682b-4b79-9ebe-c06c29750bfe\") " pod="openstack/ovn-controller-sv678" Oct 11 04:09:55 crc kubenswrapper[4798]: I1011 04:09:55.542872 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/3e260c01-682b-4b79-9ebe-c06c29750bfe-var-run-ovn\") pod \"ovn-controller-sv678\" (UID: \"3e260c01-682b-4b79-9ebe-c06c29750bfe\") " pod="openstack/ovn-controller-sv678" Oct 11 04:09:55 crc kubenswrapper[4798]: I1011 04:09:55.542935 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/7f9da136-ab4f-4a2e-99e4-e546aa4220f9-var-run\") pod \"ovn-controller-ovs-tk4gr\" (UID: \"7f9da136-ab4f-4a2e-99e4-e546aa4220f9\") " pod="openstack/ovn-controller-ovs-tk4gr" Oct 11 04:09:55 crc kubenswrapper[4798]: I1011 04:09:55.542963 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/3e260c01-682b-4b79-9ebe-c06c29750bfe-var-run\") pod \"ovn-controller-sv678\" (UID: \"3e260c01-682b-4b79-9ebe-c06c29750bfe\") " pod="openstack/ovn-controller-sv678" Oct 11 04:09:55 crc kubenswrapper[4798]: I1011 04:09:55.542986 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3e260c01-682b-4b79-9ebe-c06c29750bfe-combined-ca-bundle\") pod \"ovn-controller-sv678\" (UID: \"3e260c01-682b-4b79-9ebe-c06c29750bfe\") " pod="openstack/ovn-controller-sv678" Oct 11 04:09:55 crc kubenswrapper[4798]: I1011 04:09:55.543039 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/3e260c01-682b-4b79-9ebe-c06c29750bfe-ovn-controller-tls-certs\") pod \"ovn-controller-sv678\" (UID: \"3e260c01-682b-4b79-9ebe-c06c29750bfe\") " pod="openstack/ovn-controller-sv678" Oct 11 04:09:55 crc kubenswrapper[4798]: I1011 04:09:55.543059 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/7f9da136-ab4f-4a2e-99e4-e546aa4220f9-var-log\") pod \"ovn-controller-ovs-tk4gr\" (UID: \"7f9da136-ab4f-4a2e-99e4-e546aa4220f9\") " pod="openstack/ovn-controller-ovs-tk4gr" Oct 11 04:09:55 crc kubenswrapper[4798]: I1011 04:09:55.543082 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/3e260c01-682b-4b79-9ebe-c06c29750bfe-scripts\") pod \"ovn-controller-sv678\" (UID: \"3e260c01-682b-4b79-9ebe-c06c29750bfe\") " pod="openstack/ovn-controller-sv678" Oct 11 04:09:55 crc kubenswrapper[4798]: I1011 04:09:55.543107 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rm66n\" (UniqueName: \"kubernetes.io/projected/3e260c01-682b-4b79-9ebe-c06c29750bfe-kube-api-access-rm66n\") pod \"ovn-controller-sv678\" (UID: \"3e260c01-682b-4b79-9ebe-c06c29750bfe\") " pod="openstack/ovn-controller-sv678" Oct 11 04:09:55 crc kubenswrapper[4798]: I1011 04:09:55.543126 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/7f9da136-ab4f-4a2e-99e4-e546aa4220f9-scripts\") pod \"ovn-controller-ovs-tk4gr\" (UID: \"7f9da136-ab4f-4a2e-99e4-e546aa4220f9\") " pod="openstack/ovn-controller-ovs-tk4gr" Oct 11 04:09:55 crc kubenswrapper[4798]: I1011 04:09:55.543144 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dg588\" (UniqueName: \"kubernetes.io/projected/7f9da136-ab4f-4a2e-99e4-e546aa4220f9-kube-api-access-dg588\") pod \"ovn-controller-ovs-tk4gr\" (UID: \"7f9da136-ab4f-4a2e-99e4-e546aa4220f9\") " pod="openstack/ovn-controller-ovs-tk4gr" Oct 11 04:09:55 crc kubenswrapper[4798]: I1011 04:09:55.645350 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/3e260c01-682b-4b79-9ebe-c06c29750bfe-var-log-ovn\") pod \"ovn-controller-sv678\" (UID: \"3e260c01-682b-4b79-9ebe-c06c29750bfe\") " pod="openstack/ovn-controller-sv678" Oct 11 04:09:55 crc kubenswrapper[4798]: I1011 04:09:55.645433 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/3e260c01-682b-4b79-9ebe-c06c29750bfe-var-run-ovn\") pod \"ovn-controller-sv678\" (UID: \"3e260c01-682b-4b79-9ebe-c06c29750bfe\") " pod="openstack/ovn-controller-sv678" Oct 11 04:09:55 crc kubenswrapper[4798]: I1011 04:09:55.645474 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/7f9da136-ab4f-4a2e-99e4-e546aa4220f9-var-run\") pod \"ovn-controller-ovs-tk4gr\" (UID: \"7f9da136-ab4f-4a2e-99e4-e546aa4220f9\") " pod="openstack/ovn-controller-ovs-tk4gr" Oct 11 04:09:55 crc kubenswrapper[4798]: I1011 04:09:55.645513 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/3e260c01-682b-4b79-9ebe-c06c29750bfe-var-run\") pod \"ovn-controller-sv678\" (UID: \"3e260c01-682b-4b79-9ebe-c06c29750bfe\") " pod="openstack/ovn-controller-sv678" Oct 11 04:09:55 crc kubenswrapper[4798]: I1011 04:09:55.645551 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3e260c01-682b-4b79-9ebe-c06c29750bfe-combined-ca-bundle\") pod \"ovn-controller-sv678\" (UID: \"3e260c01-682b-4b79-9ebe-c06c29750bfe\") " pod="openstack/ovn-controller-sv678" Oct 11 04:09:55 crc kubenswrapper[4798]: I1011 04:09:55.645594 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/7f9da136-ab4f-4a2e-99e4-e546aa4220f9-var-log\") pod \"ovn-controller-ovs-tk4gr\" (UID: \"7f9da136-ab4f-4a2e-99e4-e546aa4220f9\") " pod="openstack/ovn-controller-ovs-tk4gr" Oct 11 04:09:55 crc kubenswrapper[4798]: I1011 04:09:55.645617 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/3e260c01-682b-4b79-9ebe-c06c29750bfe-ovn-controller-tls-certs\") pod \"ovn-controller-sv678\" (UID: \"3e260c01-682b-4b79-9ebe-c06c29750bfe\") " pod="openstack/ovn-controller-sv678" Oct 11 04:09:55 crc kubenswrapper[4798]: I1011 04:09:55.645651 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/3e260c01-682b-4b79-9ebe-c06c29750bfe-scripts\") pod \"ovn-controller-sv678\" (UID: \"3e260c01-682b-4b79-9ebe-c06c29750bfe\") " pod="openstack/ovn-controller-sv678" Oct 11 04:09:55 crc kubenswrapper[4798]: I1011 04:09:55.645688 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rm66n\" (UniqueName: \"kubernetes.io/projected/3e260c01-682b-4b79-9ebe-c06c29750bfe-kube-api-access-rm66n\") pod \"ovn-controller-sv678\" (UID: \"3e260c01-682b-4b79-9ebe-c06c29750bfe\") " pod="openstack/ovn-controller-sv678" Oct 11 04:09:55 crc kubenswrapper[4798]: I1011 04:09:55.645717 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/7f9da136-ab4f-4a2e-99e4-e546aa4220f9-scripts\") pod \"ovn-controller-ovs-tk4gr\" (UID: \"7f9da136-ab4f-4a2e-99e4-e546aa4220f9\") " pod="openstack/ovn-controller-ovs-tk4gr" Oct 11 04:09:55 crc kubenswrapper[4798]: I1011 04:09:55.645744 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dg588\" (UniqueName: \"kubernetes.io/projected/7f9da136-ab4f-4a2e-99e4-e546aa4220f9-kube-api-access-dg588\") pod \"ovn-controller-ovs-tk4gr\" (UID: \"7f9da136-ab4f-4a2e-99e4-e546aa4220f9\") " pod="openstack/ovn-controller-ovs-tk4gr" Oct 11 04:09:55 crc kubenswrapper[4798]: I1011 04:09:55.645793 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/7f9da136-ab4f-4a2e-99e4-e546aa4220f9-etc-ovs\") pod \"ovn-controller-ovs-tk4gr\" (UID: \"7f9da136-ab4f-4a2e-99e4-e546aa4220f9\") " pod="openstack/ovn-controller-ovs-tk4gr" Oct 11 04:09:55 crc kubenswrapper[4798]: I1011 04:09:55.645828 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/7f9da136-ab4f-4a2e-99e4-e546aa4220f9-var-lib\") pod \"ovn-controller-ovs-tk4gr\" (UID: \"7f9da136-ab4f-4a2e-99e4-e546aa4220f9\") " pod="openstack/ovn-controller-ovs-tk4gr" Oct 11 04:09:55 crc kubenswrapper[4798]: I1011 04:09:55.646526 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/7f9da136-ab4f-4a2e-99e4-e546aa4220f9-var-log\") pod \"ovn-controller-ovs-tk4gr\" (UID: \"7f9da136-ab4f-4a2e-99e4-e546aa4220f9\") " pod="openstack/ovn-controller-ovs-tk4gr" Oct 11 04:09:55 crc kubenswrapper[4798]: I1011 04:09:55.646724 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/7f9da136-ab4f-4a2e-99e4-e546aa4220f9-etc-ovs\") pod \"ovn-controller-ovs-tk4gr\" (UID: \"7f9da136-ab4f-4a2e-99e4-e546aa4220f9\") " pod="openstack/ovn-controller-ovs-tk4gr" Oct 11 04:09:55 crc kubenswrapper[4798]: I1011 04:09:55.646813 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/3e260c01-682b-4b79-9ebe-c06c29750bfe-var-log-ovn\") pod \"ovn-controller-sv678\" (UID: \"3e260c01-682b-4b79-9ebe-c06c29750bfe\") " pod="openstack/ovn-controller-sv678" Oct 11 04:09:55 crc kubenswrapper[4798]: I1011 04:09:55.646843 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/7f9da136-ab4f-4a2e-99e4-e546aa4220f9-var-run\") pod \"ovn-controller-ovs-tk4gr\" (UID: \"7f9da136-ab4f-4a2e-99e4-e546aa4220f9\") " pod="openstack/ovn-controller-ovs-tk4gr" Oct 11 04:09:55 crc kubenswrapper[4798]: I1011 04:09:55.647107 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/3e260c01-682b-4b79-9ebe-c06c29750bfe-var-run\") pod \"ovn-controller-sv678\" (UID: \"3e260c01-682b-4b79-9ebe-c06c29750bfe\") " pod="openstack/ovn-controller-sv678" Oct 11 04:09:55 crc kubenswrapper[4798]: I1011 04:09:55.647122 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/3e260c01-682b-4b79-9ebe-c06c29750bfe-var-run-ovn\") pod \"ovn-controller-sv678\" (UID: \"3e260c01-682b-4b79-9ebe-c06c29750bfe\") " pod="openstack/ovn-controller-sv678" Oct 11 04:09:55 crc kubenswrapper[4798]: I1011 04:09:55.647437 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/7f9da136-ab4f-4a2e-99e4-e546aa4220f9-var-lib\") pod \"ovn-controller-ovs-tk4gr\" (UID: \"7f9da136-ab4f-4a2e-99e4-e546aa4220f9\") " pod="openstack/ovn-controller-ovs-tk4gr" Oct 11 04:09:55 crc kubenswrapper[4798]: I1011 04:09:55.648842 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/3e260c01-682b-4b79-9ebe-c06c29750bfe-scripts\") pod \"ovn-controller-sv678\" (UID: \"3e260c01-682b-4b79-9ebe-c06c29750bfe\") " pod="openstack/ovn-controller-sv678" Oct 11 04:09:55 crc kubenswrapper[4798]: I1011 04:09:55.650221 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/7f9da136-ab4f-4a2e-99e4-e546aa4220f9-scripts\") pod \"ovn-controller-ovs-tk4gr\" (UID: \"7f9da136-ab4f-4a2e-99e4-e546aa4220f9\") " pod="openstack/ovn-controller-ovs-tk4gr" Oct 11 04:09:55 crc kubenswrapper[4798]: I1011 04:09:55.651153 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3e260c01-682b-4b79-9ebe-c06c29750bfe-combined-ca-bundle\") pod \"ovn-controller-sv678\" (UID: \"3e260c01-682b-4b79-9ebe-c06c29750bfe\") " pod="openstack/ovn-controller-sv678" Oct 11 04:09:55 crc kubenswrapper[4798]: I1011 04:09:55.654448 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/3e260c01-682b-4b79-9ebe-c06c29750bfe-ovn-controller-tls-certs\") pod \"ovn-controller-sv678\" (UID: \"3e260c01-682b-4b79-9ebe-c06c29750bfe\") " pod="openstack/ovn-controller-sv678" Oct 11 04:09:55 crc kubenswrapper[4798]: I1011 04:09:55.662818 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rm66n\" (UniqueName: \"kubernetes.io/projected/3e260c01-682b-4b79-9ebe-c06c29750bfe-kube-api-access-rm66n\") pod \"ovn-controller-sv678\" (UID: \"3e260c01-682b-4b79-9ebe-c06c29750bfe\") " pod="openstack/ovn-controller-sv678" Oct 11 04:09:55 crc kubenswrapper[4798]: I1011 04:09:55.668289 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dg588\" (UniqueName: \"kubernetes.io/projected/7f9da136-ab4f-4a2e-99e4-e546aa4220f9-kube-api-access-dg588\") pod \"ovn-controller-ovs-tk4gr\" (UID: \"7f9da136-ab4f-4a2e-99e4-e546aa4220f9\") " pod="openstack/ovn-controller-ovs-tk4gr" Oct 11 04:09:55 crc kubenswrapper[4798]: I1011 04:09:55.759686 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-sv678" Oct 11 04:09:55 crc kubenswrapper[4798]: I1011 04:09:55.830233 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-tk4gr" Oct 11 04:09:56 crc kubenswrapper[4798]: I1011 04:09:56.302528 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-nb-0"] Oct 11 04:09:56 crc kubenswrapper[4798]: I1011 04:09:56.308100 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Oct 11 04:09:56 crc kubenswrapper[4798]: I1011 04:09:56.311195 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovn-metrics" Oct 11 04:09:56 crc kubenswrapper[4798]: I1011 04:09:56.311787 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-config" Oct 11 04:09:56 crc kubenswrapper[4798]: I1011 04:09:56.312453 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-scripts" Oct 11 04:09:56 crc kubenswrapper[4798]: I1011 04:09:56.312600 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-nb-dockercfg-tzxxr" Oct 11 04:09:56 crc kubenswrapper[4798]: I1011 04:09:56.312734 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-nb-ovndbs" Oct 11 04:09:56 crc kubenswrapper[4798]: I1011 04:09:56.320103 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Oct 11 04:09:56 crc kubenswrapper[4798]: I1011 04:09:56.379031 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/5fcb3fcb-1839-493f-b485-0bb9ea061bc2-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"5fcb3fcb-1839-493f-b485-0bb9ea061bc2\") " pod="openstack/ovsdbserver-nb-0" Oct 11 04:09:56 crc kubenswrapper[4798]: I1011 04:09:56.379178 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/5fcb3fcb-1839-493f-b485-0bb9ea061bc2-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"5fcb3fcb-1839-493f-b485-0bb9ea061bc2\") " pod="openstack/ovsdbserver-nb-0" Oct 11 04:09:56 crc kubenswrapper[4798]: I1011 04:09:56.379259 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"ovsdbserver-nb-0\" (UID: \"5fcb3fcb-1839-493f-b485-0bb9ea061bc2\") " pod="openstack/ovsdbserver-nb-0" Oct 11 04:09:56 crc kubenswrapper[4798]: I1011 04:09:56.379286 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/5fcb3fcb-1839-493f-b485-0bb9ea061bc2-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"5fcb3fcb-1839-493f-b485-0bb9ea061bc2\") " pod="openstack/ovsdbserver-nb-0" Oct 11 04:09:56 crc kubenswrapper[4798]: I1011 04:09:56.379326 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5fcb3fcb-1839-493f-b485-0bb9ea061bc2-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"5fcb3fcb-1839-493f-b485-0bb9ea061bc2\") " pod="openstack/ovsdbserver-nb-0" Oct 11 04:09:56 crc kubenswrapper[4798]: I1011 04:09:56.379345 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lj6px\" (UniqueName: \"kubernetes.io/projected/5fcb3fcb-1839-493f-b485-0bb9ea061bc2-kube-api-access-lj6px\") pod \"ovsdbserver-nb-0\" (UID: \"5fcb3fcb-1839-493f-b485-0bb9ea061bc2\") " pod="openstack/ovsdbserver-nb-0" Oct 11 04:09:56 crc kubenswrapper[4798]: I1011 04:09:56.379430 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/5fcb3fcb-1839-493f-b485-0bb9ea061bc2-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"5fcb3fcb-1839-493f-b485-0bb9ea061bc2\") " pod="openstack/ovsdbserver-nb-0" Oct 11 04:09:56 crc kubenswrapper[4798]: I1011 04:09:56.379453 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5fcb3fcb-1839-493f-b485-0bb9ea061bc2-config\") pod \"ovsdbserver-nb-0\" (UID: \"5fcb3fcb-1839-493f-b485-0bb9ea061bc2\") " pod="openstack/ovsdbserver-nb-0" Oct 11 04:09:56 crc kubenswrapper[4798]: I1011 04:09:56.481540 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5fcb3fcb-1839-493f-b485-0bb9ea061bc2-config\") pod \"ovsdbserver-nb-0\" (UID: \"5fcb3fcb-1839-493f-b485-0bb9ea061bc2\") " pod="openstack/ovsdbserver-nb-0" Oct 11 04:09:56 crc kubenswrapper[4798]: I1011 04:09:56.481637 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/5fcb3fcb-1839-493f-b485-0bb9ea061bc2-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"5fcb3fcb-1839-493f-b485-0bb9ea061bc2\") " pod="openstack/ovsdbserver-nb-0" Oct 11 04:09:56 crc kubenswrapper[4798]: I1011 04:09:56.481662 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/5fcb3fcb-1839-493f-b485-0bb9ea061bc2-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"5fcb3fcb-1839-493f-b485-0bb9ea061bc2\") " pod="openstack/ovsdbserver-nb-0" Oct 11 04:09:56 crc kubenswrapper[4798]: I1011 04:09:56.483155 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/5fcb3fcb-1839-493f-b485-0bb9ea061bc2-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"5fcb3fcb-1839-493f-b485-0bb9ea061bc2\") " pod="openstack/ovsdbserver-nb-0" Oct 11 04:09:56 crc kubenswrapper[4798]: I1011 04:09:56.483219 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"ovsdbserver-nb-0\" (UID: \"5fcb3fcb-1839-493f-b485-0bb9ea061bc2\") " pod="openstack/ovsdbserver-nb-0" Oct 11 04:09:56 crc kubenswrapper[4798]: I1011 04:09:56.483269 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/5fcb3fcb-1839-493f-b485-0bb9ea061bc2-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"5fcb3fcb-1839-493f-b485-0bb9ea061bc2\") " pod="openstack/ovsdbserver-nb-0" Oct 11 04:09:56 crc kubenswrapper[4798]: I1011 04:09:56.483704 4798 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"ovsdbserver-nb-0\" (UID: \"5fcb3fcb-1839-493f-b485-0bb9ea061bc2\") device mount path \"/mnt/openstack/pv09\"" pod="openstack/ovsdbserver-nb-0" Oct 11 04:09:56 crc kubenswrapper[4798]: I1011 04:09:56.484110 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5fcb3fcb-1839-493f-b485-0bb9ea061bc2-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"5fcb3fcb-1839-493f-b485-0bb9ea061bc2\") " pod="openstack/ovsdbserver-nb-0" Oct 11 04:09:56 crc kubenswrapper[4798]: I1011 04:09:56.484148 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lj6px\" (UniqueName: \"kubernetes.io/projected/5fcb3fcb-1839-493f-b485-0bb9ea061bc2-kube-api-access-lj6px\") pod \"ovsdbserver-nb-0\" (UID: \"5fcb3fcb-1839-493f-b485-0bb9ea061bc2\") " pod="openstack/ovsdbserver-nb-0" Oct 11 04:09:56 crc kubenswrapper[4798]: I1011 04:09:56.484346 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/5fcb3fcb-1839-493f-b485-0bb9ea061bc2-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"5fcb3fcb-1839-493f-b485-0bb9ea061bc2\") " pod="openstack/ovsdbserver-nb-0" Oct 11 04:09:56 crc kubenswrapper[4798]: I1011 04:09:56.484525 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/5fcb3fcb-1839-493f-b485-0bb9ea061bc2-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"5fcb3fcb-1839-493f-b485-0bb9ea061bc2\") " pod="openstack/ovsdbserver-nb-0" Oct 11 04:09:56 crc kubenswrapper[4798]: I1011 04:09:56.484596 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5fcb3fcb-1839-493f-b485-0bb9ea061bc2-config\") pod \"ovsdbserver-nb-0\" (UID: \"5fcb3fcb-1839-493f-b485-0bb9ea061bc2\") " pod="openstack/ovsdbserver-nb-0" Oct 11 04:09:56 crc kubenswrapper[4798]: I1011 04:09:56.486834 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/5fcb3fcb-1839-493f-b485-0bb9ea061bc2-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"5fcb3fcb-1839-493f-b485-0bb9ea061bc2\") " pod="openstack/ovsdbserver-nb-0" Oct 11 04:09:56 crc kubenswrapper[4798]: I1011 04:09:56.503383 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5fcb3fcb-1839-493f-b485-0bb9ea061bc2-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"5fcb3fcb-1839-493f-b485-0bb9ea061bc2\") " pod="openstack/ovsdbserver-nb-0" Oct 11 04:09:56 crc kubenswrapper[4798]: I1011 04:09:56.505894 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/5fcb3fcb-1839-493f-b485-0bb9ea061bc2-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"5fcb3fcb-1839-493f-b485-0bb9ea061bc2\") " pod="openstack/ovsdbserver-nb-0" Oct 11 04:09:56 crc kubenswrapper[4798]: I1011 04:09:56.517269 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lj6px\" (UniqueName: \"kubernetes.io/projected/5fcb3fcb-1839-493f-b485-0bb9ea061bc2-kube-api-access-lj6px\") pod \"ovsdbserver-nb-0\" (UID: \"5fcb3fcb-1839-493f-b485-0bb9ea061bc2\") " pod="openstack/ovsdbserver-nb-0" Oct 11 04:09:56 crc kubenswrapper[4798]: I1011 04:09:56.518757 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"ovsdbserver-nb-0\" (UID: \"5fcb3fcb-1839-493f-b485-0bb9ea061bc2\") " pod="openstack/ovsdbserver-nb-0" Oct 11 04:09:56 crc kubenswrapper[4798]: I1011 04:09:56.633515 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Oct 11 04:09:57 crc kubenswrapper[4798]: I1011 04:09:57.138796 4798 patch_prober.go:28] interesting pod/machine-config-daemon-h28s2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 11 04:09:57 crc kubenswrapper[4798]: I1011 04:09:57.138886 4798 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 11 04:09:58 crc kubenswrapper[4798]: I1011 04:09:58.113703 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-sb-0"] Oct 11 04:09:58 crc kubenswrapper[4798]: I1011 04:09:58.116123 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Oct 11 04:09:58 crc kubenswrapper[4798]: I1011 04:09:58.120056 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-sb-dockercfg-bjq8k" Oct 11 04:09:58 crc kubenswrapper[4798]: I1011 04:09:58.123082 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-sb-ovndbs" Oct 11 04:09:58 crc kubenswrapper[4798]: I1011 04:09:58.123226 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-scripts" Oct 11 04:09:58 crc kubenswrapper[4798]: I1011 04:09:58.124606 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-config" Oct 11 04:09:58 crc kubenswrapper[4798]: I1011 04:09:58.134434 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Oct 11 04:09:58 crc kubenswrapper[4798]: I1011 04:09:58.214856 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/76d0b2fe-e594-4a66-8358-d3aed19300ea-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"76d0b2fe-e594-4a66-8358-d3aed19300ea\") " pod="openstack/ovsdbserver-sb-0" Oct 11 04:09:58 crc kubenswrapper[4798]: I1011 04:09:58.214903 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/76d0b2fe-e594-4a66-8358-d3aed19300ea-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"76d0b2fe-e594-4a66-8358-d3aed19300ea\") " pod="openstack/ovsdbserver-sb-0" Oct 11 04:09:58 crc kubenswrapper[4798]: I1011 04:09:58.214929 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/76d0b2fe-e594-4a66-8358-d3aed19300ea-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"76d0b2fe-e594-4a66-8358-d3aed19300ea\") " pod="openstack/ovsdbserver-sb-0" Oct 11 04:09:58 crc kubenswrapper[4798]: I1011 04:09:58.214948 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/76d0b2fe-e594-4a66-8358-d3aed19300ea-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"76d0b2fe-e594-4a66-8358-d3aed19300ea\") " pod="openstack/ovsdbserver-sb-0" Oct 11 04:09:58 crc kubenswrapper[4798]: I1011 04:09:58.215361 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/76d0b2fe-e594-4a66-8358-d3aed19300ea-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"76d0b2fe-e594-4a66-8358-d3aed19300ea\") " pod="openstack/ovsdbserver-sb-0" Oct 11 04:09:58 crc kubenswrapper[4798]: I1011 04:09:58.215519 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/76d0b2fe-e594-4a66-8358-d3aed19300ea-config\") pod \"ovsdbserver-sb-0\" (UID: \"76d0b2fe-e594-4a66-8358-d3aed19300ea\") " pod="openstack/ovsdbserver-sb-0" Oct 11 04:09:58 crc kubenswrapper[4798]: I1011 04:09:58.215631 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"ovsdbserver-sb-0\" (UID: \"76d0b2fe-e594-4a66-8358-d3aed19300ea\") " pod="openstack/ovsdbserver-sb-0" Oct 11 04:09:58 crc kubenswrapper[4798]: I1011 04:09:58.215656 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hzd6g\" (UniqueName: \"kubernetes.io/projected/76d0b2fe-e594-4a66-8358-d3aed19300ea-kube-api-access-hzd6g\") pod \"ovsdbserver-sb-0\" (UID: \"76d0b2fe-e594-4a66-8358-d3aed19300ea\") " pod="openstack/ovsdbserver-sb-0" Oct 11 04:09:58 crc kubenswrapper[4798]: I1011 04:09:58.316891 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/76d0b2fe-e594-4a66-8358-d3aed19300ea-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"76d0b2fe-e594-4a66-8358-d3aed19300ea\") " pod="openstack/ovsdbserver-sb-0" Oct 11 04:09:58 crc kubenswrapper[4798]: I1011 04:09:58.316945 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/76d0b2fe-e594-4a66-8358-d3aed19300ea-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"76d0b2fe-e594-4a66-8358-d3aed19300ea\") " pod="openstack/ovsdbserver-sb-0" Oct 11 04:09:58 crc kubenswrapper[4798]: I1011 04:09:58.316983 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/76d0b2fe-e594-4a66-8358-d3aed19300ea-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"76d0b2fe-e594-4a66-8358-d3aed19300ea\") " pod="openstack/ovsdbserver-sb-0" Oct 11 04:09:58 crc kubenswrapper[4798]: I1011 04:09:58.317000 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/76d0b2fe-e594-4a66-8358-d3aed19300ea-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"76d0b2fe-e594-4a66-8358-d3aed19300ea\") " pod="openstack/ovsdbserver-sb-0" Oct 11 04:09:58 crc kubenswrapper[4798]: I1011 04:09:58.317072 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/76d0b2fe-e594-4a66-8358-d3aed19300ea-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"76d0b2fe-e594-4a66-8358-d3aed19300ea\") " pod="openstack/ovsdbserver-sb-0" Oct 11 04:09:58 crc kubenswrapper[4798]: I1011 04:09:58.317099 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/76d0b2fe-e594-4a66-8358-d3aed19300ea-config\") pod \"ovsdbserver-sb-0\" (UID: \"76d0b2fe-e594-4a66-8358-d3aed19300ea\") " pod="openstack/ovsdbserver-sb-0" Oct 11 04:09:58 crc kubenswrapper[4798]: I1011 04:09:58.317127 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"ovsdbserver-sb-0\" (UID: \"76d0b2fe-e594-4a66-8358-d3aed19300ea\") " pod="openstack/ovsdbserver-sb-0" Oct 11 04:09:58 crc kubenswrapper[4798]: I1011 04:09:58.317144 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hzd6g\" (UniqueName: \"kubernetes.io/projected/76d0b2fe-e594-4a66-8358-d3aed19300ea-kube-api-access-hzd6g\") pod \"ovsdbserver-sb-0\" (UID: \"76d0b2fe-e594-4a66-8358-d3aed19300ea\") " pod="openstack/ovsdbserver-sb-0" Oct 11 04:09:58 crc kubenswrapper[4798]: I1011 04:09:58.317580 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/76d0b2fe-e594-4a66-8358-d3aed19300ea-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"76d0b2fe-e594-4a66-8358-d3aed19300ea\") " pod="openstack/ovsdbserver-sb-0" Oct 11 04:09:58 crc kubenswrapper[4798]: I1011 04:09:58.318650 4798 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"ovsdbserver-sb-0\" (UID: \"76d0b2fe-e594-4a66-8358-d3aed19300ea\") device mount path \"/mnt/openstack/pv07\"" pod="openstack/ovsdbserver-sb-0" Oct 11 04:09:58 crc kubenswrapper[4798]: I1011 04:09:58.318793 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/76d0b2fe-e594-4a66-8358-d3aed19300ea-config\") pod \"ovsdbserver-sb-0\" (UID: \"76d0b2fe-e594-4a66-8358-d3aed19300ea\") " pod="openstack/ovsdbserver-sb-0" Oct 11 04:09:58 crc kubenswrapper[4798]: I1011 04:09:58.318898 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/76d0b2fe-e594-4a66-8358-d3aed19300ea-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"76d0b2fe-e594-4a66-8358-d3aed19300ea\") " pod="openstack/ovsdbserver-sb-0" Oct 11 04:09:58 crc kubenswrapper[4798]: I1011 04:09:58.322305 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/76d0b2fe-e594-4a66-8358-d3aed19300ea-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"76d0b2fe-e594-4a66-8358-d3aed19300ea\") " pod="openstack/ovsdbserver-sb-0" Oct 11 04:09:58 crc kubenswrapper[4798]: I1011 04:09:58.322643 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/76d0b2fe-e594-4a66-8358-d3aed19300ea-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"76d0b2fe-e594-4a66-8358-d3aed19300ea\") " pod="openstack/ovsdbserver-sb-0" Oct 11 04:09:58 crc kubenswrapper[4798]: I1011 04:09:58.326558 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/76d0b2fe-e594-4a66-8358-d3aed19300ea-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"76d0b2fe-e594-4a66-8358-d3aed19300ea\") " pod="openstack/ovsdbserver-sb-0" Oct 11 04:09:58 crc kubenswrapper[4798]: I1011 04:09:58.337290 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hzd6g\" (UniqueName: \"kubernetes.io/projected/76d0b2fe-e594-4a66-8358-d3aed19300ea-kube-api-access-hzd6g\") pod \"ovsdbserver-sb-0\" (UID: \"76d0b2fe-e594-4a66-8358-d3aed19300ea\") " pod="openstack/ovsdbserver-sb-0" Oct 11 04:09:58 crc kubenswrapper[4798]: I1011 04:09:58.345661 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"ovsdbserver-sb-0\" (UID: \"76d0b2fe-e594-4a66-8358-d3aed19300ea\") " pod="openstack/ovsdbserver-sb-0" Oct 11 04:09:58 crc kubenswrapper[4798]: I1011 04:09:58.459104 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Oct 11 04:10:01 crc kubenswrapper[4798]: E1011 04:10:01.351097 4798 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Oct 11 04:10:01 crc kubenswrapper[4798]: E1011 04:10:01.351540 4798 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:ndfhb5h667h568h584h5f9h58dh565h664h587h597h577h64bh5c4h66fh647hbdh68ch5c5h68dh686h5f7h64hd7hc6h55fh57bh98h57fh87h5fh57fq,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-8rv75,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-78dd6ddcc-r2ws8_openstack(a856b627-fcd6-4e90-be23-4f4712b5ac04): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Oct 11 04:10:01 crc kubenswrapper[4798]: E1011 04:10:01.352408 4798 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Oct 11 04:10:01 crc kubenswrapper[4798]: E1011 04:10:01.352496 4798 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:nffh5bdhf4h5f8h79h55h77h58fh56dh7bh6fh578hbch55dh68h56bhd9h65dh57ch658hc9h566h666h688h58h65dh684h5d7h6ch575h5d6h88q,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-pchjj,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-675f4bcbfc-fjmqr_openstack(a42a149b-4472-4ba5-b522-db08d1452296): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Oct 11 04:10:01 crc kubenswrapper[4798]: E1011 04:10:01.352716 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-78dd6ddcc-r2ws8" podUID="a856b627-fcd6-4e90-be23-4f4712b5ac04" Oct 11 04:10:01 crc kubenswrapper[4798]: E1011 04:10:01.353642 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-675f4bcbfc-fjmqr" podUID="a42a149b-4472-4ba5-b522-db08d1452296" Oct 11 04:10:01 crc kubenswrapper[4798]: I1011 04:10:01.882815 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Oct 11 04:10:01 crc kubenswrapper[4798]: I1011 04:10:01.999147 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Oct 11 04:10:02 crc kubenswrapper[4798]: W1011 04:10:02.010953 4798 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda3433fc1_acd1_4b1e_9df2_578848000615.slice/crio-db3be4f68b1042f571edd266025eabf19341b9c779ae1c23c3ea0d9181fd1e4f WatchSource:0}: Error finding container db3be4f68b1042f571edd266025eabf19341b9c779ae1c23c3ea0d9181fd1e4f: Status 404 returned error can't find the container with id db3be4f68b1042f571edd266025eabf19341b9c779ae1c23c3ea0d9181fd1e4f Oct 11 04:10:02 crc kubenswrapper[4798]: W1011 04:10:02.042218 4798 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podeff19131_d13b_495f_94a3_097e030fd3cd.slice/crio-f5cf56969a3e007e59fdcf2b0d2eec2da5d53d593d125796e2a8c0050bfe6934 WatchSource:0}: Error finding container f5cf56969a3e007e59fdcf2b0d2eec2da5d53d593d125796e2a8c0050bfe6934: Status 404 returned error can't find the container with id f5cf56969a3e007e59fdcf2b0d2eec2da5d53d593d125796e2a8c0050bfe6934 Oct 11 04:10:02 crc kubenswrapper[4798]: I1011 04:10:02.047805 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-fjmqr" Oct 11 04:10:02 crc kubenswrapper[4798]: I1011 04:10:02.056267 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-r2ws8" Oct 11 04:10:02 crc kubenswrapper[4798]: I1011 04:10:02.093955 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pchjj\" (UniqueName: \"kubernetes.io/projected/a42a149b-4472-4ba5-b522-db08d1452296-kube-api-access-pchjj\") pod \"a42a149b-4472-4ba5-b522-db08d1452296\" (UID: \"a42a149b-4472-4ba5-b522-db08d1452296\") " Oct 11 04:10:02 crc kubenswrapper[4798]: I1011 04:10:02.094074 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a856b627-fcd6-4e90-be23-4f4712b5ac04-config\") pod \"a856b627-fcd6-4e90-be23-4f4712b5ac04\" (UID: \"a856b627-fcd6-4e90-be23-4f4712b5ac04\") " Oct 11 04:10:02 crc kubenswrapper[4798]: I1011 04:10:02.094100 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a42a149b-4472-4ba5-b522-db08d1452296-config\") pod \"a42a149b-4472-4ba5-b522-db08d1452296\" (UID: \"a42a149b-4472-4ba5-b522-db08d1452296\") " Oct 11 04:10:02 crc kubenswrapper[4798]: I1011 04:10:02.094128 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a856b627-fcd6-4e90-be23-4f4712b5ac04-dns-svc\") pod \"a856b627-fcd6-4e90-be23-4f4712b5ac04\" (UID: \"a856b627-fcd6-4e90-be23-4f4712b5ac04\") " Oct 11 04:10:02 crc kubenswrapper[4798]: I1011 04:10:02.094146 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8rv75\" (UniqueName: \"kubernetes.io/projected/a856b627-fcd6-4e90-be23-4f4712b5ac04-kube-api-access-8rv75\") pod \"a856b627-fcd6-4e90-be23-4f4712b5ac04\" (UID: \"a856b627-fcd6-4e90-be23-4f4712b5ac04\") " Oct 11 04:10:02 crc kubenswrapper[4798]: I1011 04:10:02.094695 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a42a149b-4472-4ba5-b522-db08d1452296-config" (OuterVolumeSpecName: "config") pod "a42a149b-4472-4ba5-b522-db08d1452296" (UID: "a42a149b-4472-4ba5-b522-db08d1452296"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 04:10:02 crc kubenswrapper[4798]: I1011 04:10:02.094997 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a856b627-fcd6-4e90-be23-4f4712b5ac04-config" (OuterVolumeSpecName: "config") pod "a856b627-fcd6-4e90-be23-4f4712b5ac04" (UID: "a856b627-fcd6-4e90-be23-4f4712b5ac04"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 04:10:02 crc kubenswrapper[4798]: I1011 04:10:02.095030 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a856b627-fcd6-4e90-be23-4f4712b5ac04-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "a856b627-fcd6-4e90-be23-4f4712b5ac04" (UID: "a856b627-fcd6-4e90-be23-4f4712b5ac04"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 04:10:02 crc kubenswrapper[4798]: I1011 04:10:02.100284 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a856b627-fcd6-4e90-be23-4f4712b5ac04-kube-api-access-8rv75" (OuterVolumeSpecName: "kube-api-access-8rv75") pod "a856b627-fcd6-4e90-be23-4f4712b5ac04" (UID: "a856b627-fcd6-4e90-be23-4f4712b5ac04"). InnerVolumeSpecName "kube-api-access-8rv75". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 04:10:02 crc kubenswrapper[4798]: I1011 04:10:02.100755 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a42a149b-4472-4ba5-b522-db08d1452296-kube-api-access-pchjj" (OuterVolumeSpecName: "kube-api-access-pchjj") pod "a42a149b-4472-4ba5-b522-db08d1452296" (UID: "a42a149b-4472-4ba5-b522-db08d1452296"). InnerVolumeSpecName "kube-api-access-pchjj". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 04:10:02 crc kubenswrapper[4798]: I1011 04:10:02.197139 4798 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a856b627-fcd6-4e90-be23-4f4712b5ac04-config\") on node \"crc\" DevicePath \"\"" Oct 11 04:10:02 crc kubenswrapper[4798]: I1011 04:10:02.197202 4798 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a42a149b-4472-4ba5-b522-db08d1452296-config\") on node \"crc\" DevicePath \"\"" Oct 11 04:10:02 crc kubenswrapper[4798]: I1011 04:10:02.197217 4798 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a856b627-fcd6-4e90-be23-4f4712b5ac04-dns-svc\") on node \"crc\" DevicePath \"\"" Oct 11 04:10:02 crc kubenswrapper[4798]: I1011 04:10:02.197234 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8rv75\" (UniqueName: \"kubernetes.io/projected/a856b627-fcd6-4e90-be23-4f4712b5ac04-kube-api-access-8rv75\") on node \"crc\" DevicePath \"\"" Oct 11 04:10:02 crc kubenswrapper[4798]: I1011 04:10:02.197598 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pchjj\" (UniqueName: \"kubernetes.io/projected/a42a149b-4472-4ba5-b522-db08d1452296-kube-api-access-pchjj\") on node \"crc\" DevicePath \"\"" Oct 11 04:10:02 crc kubenswrapper[4798]: I1011 04:10:02.248353 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Oct 11 04:10:02 crc kubenswrapper[4798]: I1011 04:10:02.255610 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Oct 11 04:10:02 crc kubenswrapper[4798]: W1011 04:10:02.263730 4798 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2d1fad00_1405_4149_96d6_7ef60d34c4f1.slice/crio-703ff4aa0b17acbdb0a6ede387bc15de991af5265d1ceefff46408701332f53c WatchSource:0}: Error finding container 703ff4aa0b17acbdb0a6ede387bc15de991af5265d1ceefff46408701332f53c: Status 404 returned error can't find the container with id 703ff4aa0b17acbdb0a6ede387bc15de991af5265d1ceefff46408701332f53c Oct 11 04:10:02 crc kubenswrapper[4798]: I1011 04:10:02.353582 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Oct 11 04:10:02 crc kubenswrapper[4798]: I1011 04:10:02.428962 4798 generic.go:334] "Generic (PLEG): container finished" podID="40f1f306-106b-4b2c-85bf-d97962d6db9a" containerID="3a3589820b69b86a23d3f859b412e6e69456a460fff096a87f4593d7ab4a1d61" exitCode=0 Oct 11 04:10:02 crc kubenswrapper[4798]: I1011 04:10:02.429025 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-666b6646f7-cg6s5" event={"ID":"40f1f306-106b-4b2c-85bf-d97962d6db9a","Type":"ContainerDied","Data":"3a3589820b69b86a23d3f859b412e6e69456a460fff096a87f4593d7ab4a1d61"} Oct 11 04:10:02 crc kubenswrapper[4798]: I1011 04:10:02.432779 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-675f4bcbfc-fjmqr" event={"ID":"a42a149b-4472-4ba5-b522-db08d1452296","Type":"ContainerDied","Data":"ead72681fe5962627f95b7939de50bb683ddb5aad3df0285fde06bebfaca9399"} Oct 11 04:10:02 crc kubenswrapper[4798]: I1011 04:10:02.432857 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-fjmqr" Oct 11 04:10:02 crc kubenswrapper[4798]: I1011 04:10:02.441190 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"a3433fc1-acd1-4b1e-9df2-578848000615","Type":"ContainerStarted","Data":"db3be4f68b1042f571edd266025eabf19341b9c779ae1c23c3ea0d9181fd1e4f"} Oct 11 04:10:02 crc kubenswrapper[4798]: I1011 04:10:02.453479 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78dd6ddcc-r2ws8" event={"ID":"a856b627-fcd6-4e90-be23-4f4712b5ac04","Type":"ContainerDied","Data":"8d69e0555636e85a30347cddc58e79bede5f26e73257fb389656e775e7cfc1a7"} Oct 11 04:10:02 crc kubenswrapper[4798]: I1011 04:10:02.453613 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-r2ws8" Oct 11 04:10:02 crc kubenswrapper[4798]: I1011 04:10:02.479653 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"6ff4b0aa-8ae0-4301-8bb1-6978cfa47207","Type":"ContainerStarted","Data":"e3c860f179de47cf2aa2aaee0b00ce3c6aa9de3a3cc91d5bbdb78390368fc4b0"} Oct 11 04:10:02 crc kubenswrapper[4798]: I1011 04:10:02.481663 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Oct 11 04:10:02 crc kubenswrapper[4798]: I1011 04:10:02.483286 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"76d0b2fe-e594-4a66-8358-d3aed19300ea","Type":"ContainerStarted","Data":"e1fc7a3a855e93812c97fc2b739339de142bc34f47d293dcc0ae4e73458ebe5b"} Oct 11 04:10:02 crc kubenswrapper[4798]: I1011 04:10:02.489994 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"eff19131-d13b-495f-94a3-097e030fd3cd","Type":"ContainerStarted","Data":"f5cf56969a3e007e59fdcf2b0d2eec2da5d53d593d125796e2a8c0050bfe6934"} Oct 11 04:10:02 crc kubenswrapper[4798]: I1011 04:10:02.498611 4798 generic.go:334] "Generic (PLEG): container finished" podID="ef82cf1a-27d8-4ce1-82ca-5143fb76af9a" containerID="69416d927d550b02a94e02630f0db4c392c65866d74faaa40915b68db26abbdb" exitCode=0 Oct 11 04:10:02 crc kubenswrapper[4798]: I1011 04:10:02.498836 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-wcmbz" event={"ID":"ef82cf1a-27d8-4ce1-82ca-5143fb76af9a","Type":"ContainerDied","Data":"69416d927d550b02a94e02630f0db4c392c65866d74faaa40915b68db26abbdb"} Oct 11 04:10:02 crc kubenswrapper[4798]: I1011 04:10:02.504181 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"2d1fad00-1405-4149-96d6-7ef60d34c4f1","Type":"ContainerStarted","Data":"703ff4aa0b17acbdb0a6ede387bc15de991af5265d1ceefff46408701332f53c"} Oct 11 04:10:02 crc kubenswrapper[4798]: I1011 04:10:02.521178 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-sv678"] Oct 11 04:10:02 crc kubenswrapper[4798]: I1011 04:10:02.528659 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Oct 11 04:10:02 crc kubenswrapper[4798]: I1011 04:10:02.680385 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Oct 11 04:10:02 crc kubenswrapper[4798]: I1011 04:10:02.803836 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-r2ws8"] Oct 11 04:10:02 crc kubenswrapper[4798]: I1011 04:10:02.813634 4798 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-r2ws8"] Oct 11 04:10:02 crc kubenswrapper[4798]: I1011 04:10:02.881083 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-fjmqr"] Oct 11 04:10:02 crc kubenswrapper[4798]: I1011 04:10:02.887072 4798 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-fjmqr"] Oct 11 04:10:02 crc kubenswrapper[4798]: I1011 04:10:02.915537 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-tk4gr"] Oct 11 04:10:02 crc kubenswrapper[4798]: E1011 04:10:02.931432 4798 log.go:32] "CreateContainer in sandbox from runtime service failed" err=< Oct 11 04:10:02 crc kubenswrapper[4798]: rpc error: code = Unknown desc = container create failed: mount `/var/lib/kubelet/pods/40f1f306-106b-4b2c-85bf-d97962d6db9a/volume-subpaths/dns-svc/dnsmasq-dns/1` to `etc/dnsmasq.d/hosts/dns-svc`: No such file or directory Oct 11 04:10:02 crc kubenswrapper[4798]: > podSandboxID="2ef3b98679dd57042132a79492753e0403ee66cb23b99d67b6b41c0208ebd63a" Oct 11 04:10:02 crc kubenswrapper[4798]: E1011 04:10:02.931641 4798 kuberuntime_manager.go:1274] "Unhandled Error" err=< Oct 11 04:10:02 crc kubenswrapper[4798]: container &Container{Name:dnsmasq-dns,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n68chd6h679hbfh55fhc6h5ffh5d8h94h56ch589hb4hc5h57bh677hcdh655h8dh667h675h654h66ch567h8fh659h5b4h675h566h55bh54h67dh6dq,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-vl56m,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:nil,TCPSocket:&TCPSocketAction{Port:{0 5353 },Host:,},GRPC:nil,},InitialDelaySeconds:3,TimeoutSeconds:5,PeriodSeconds:3,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:nil,TCPSocket:&TCPSocketAction{Port:{0 5353 },Host:,},GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-666b6646f7-cg6s5_openstack(40f1f306-106b-4b2c-85bf-d97962d6db9a): CreateContainerError: container create failed: mount `/var/lib/kubelet/pods/40f1f306-106b-4b2c-85bf-d97962d6db9a/volume-subpaths/dns-svc/dnsmasq-dns/1` to `etc/dnsmasq.d/hosts/dns-svc`: No such file or directory Oct 11 04:10:02 crc kubenswrapper[4798]: > logger="UnhandledError" Oct 11 04:10:02 crc kubenswrapper[4798]: E1011 04:10:02.932787 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"dnsmasq-dns\" with CreateContainerError: \"container create failed: mount `/var/lib/kubelet/pods/40f1f306-106b-4b2c-85bf-d97962d6db9a/volume-subpaths/dns-svc/dnsmasq-dns/1` to `etc/dnsmasq.d/hosts/dns-svc`: No such file or directory\\n\"" pod="openstack/dnsmasq-dns-666b6646f7-cg6s5" podUID="40f1f306-106b-4b2c-85bf-d97962d6db9a" Oct 11 04:10:02 crc kubenswrapper[4798]: W1011 04:10:02.982545 4798 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7f9da136_ab4f_4a2e_99e4_e546aa4220f9.slice/crio-2db7ab039bb820ec730fb5cd240c18896334ac04fe03b71c6d6207de368a9b4b WatchSource:0}: Error finding container 2db7ab039bb820ec730fb5cd240c18896334ac04fe03b71c6d6207de368a9b4b: Status 404 returned error can't find the container with id 2db7ab039bb820ec730fb5cd240c18896334ac04fe03b71c6d6207de368a9b4b Oct 11 04:10:03 crc kubenswrapper[4798]: I1011 04:10:03.452634 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a42a149b-4472-4ba5-b522-db08d1452296" path="/var/lib/kubelet/pods/a42a149b-4472-4ba5-b522-db08d1452296/volumes" Oct 11 04:10:03 crc kubenswrapper[4798]: I1011 04:10:03.453491 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a856b627-fcd6-4e90-be23-4f4712b5ac04" path="/var/lib/kubelet/pods/a856b627-fcd6-4e90-be23-4f4712b5ac04/volumes" Oct 11 04:10:03 crc kubenswrapper[4798]: I1011 04:10:03.520588 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-tk4gr" event={"ID":"7f9da136-ab4f-4a2e-99e4-e546aa4220f9","Type":"ContainerStarted","Data":"2db7ab039bb820ec730fb5cd240c18896334ac04fe03b71c6d6207de368a9b4b"} Oct 11 04:10:03 crc kubenswrapper[4798]: I1011 04:10:03.523130 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"5fcb3fcb-1839-493f-b485-0bb9ea061bc2","Type":"ContainerStarted","Data":"ddbc74ba0ae817d5f1671435aa0e9b9ad7acc3be77a51c576f7f7857bbd20e11"} Oct 11 04:10:03 crc kubenswrapper[4798]: I1011 04:10:03.529035 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-wcmbz" event={"ID":"ef82cf1a-27d8-4ce1-82ca-5143fb76af9a","Type":"ContainerStarted","Data":"de0f0f6e306be045068b75d455a1a4e29ab065a7e48d0a8af6d89c0145f76353"} Oct 11 04:10:03 crc kubenswrapper[4798]: I1011 04:10:03.529239 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-57d769cc4f-wcmbz" Oct 11 04:10:03 crc kubenswrapper[4798]: I1011 04:10:03.531106 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"f7b18bbb-aead-4356-99b3-f1ee253f86e8","Type":"ContainerStarted","Data":"5533a9c8bb97352dfddb862076432a6842f135c5df8aa24bba20d72a136c5271"} Oct 11 04:10:03 crc kubenswrapper[4798]: I1011 04:10:03.541750 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-sv678" event={"ID":"3e260c01-682b-4b79-9ebe-c06c29750bfe","Type":"ContainerStarted","Data":"c84fdde8effdf00eb7b52d42d5842169520c46475a4447c0aa5fe68dc2f272f5"} Oct 11 04:10:03 crc kubenswrapper[4798]: I1011 04:10:03.543963 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"54e4a0f3-c43f-418e-b105-fd19db3ce615","Type":"ContainerStarted","Data":"55d4d3a6617e40340c0da0dbe84363c3903e839a32df9f4c6e9831ad2d509547"} Oct 11 04:10:03 crc kubenswrapper[4798]: I1011 04:10:03.558774 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-57d769cc4f-wcmbz" podStartSLOduration=7.923723709 podStartE2EDuration="18.558742643s" podCreationTimestamp="2025-10-11 04:09:45 +0000 UTC" firstStartedPulling="2025-10-11 04:09:50.974294608 +0000 UTC m=+886.310584294" lastFinishedPulling="2025-10-11 04:10:01.609313542 +0000 UTC m=+896.945603228" observedRunningTime="2025-10-11 04:10:03.553103498 +0000 UTC m=+898.889393184" watchObservedRunningTime="2025-10-11 04:10:03.558742643 +0000 UTC m=+898.895032329" Oct 11 04:10:10 crc kubenswrapper[4798]: I1011 04:10:10.868592 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-57d769cc4f-wcmbz" Oct 11 04:10:10 crc kubenswrapper[4798]: I1011 04:10:10.949299 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-cg6s5"] Oct 11 04:10:13 crc kubenswrapper[4798]: I1011 04:10:13.641385 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"5fcb3fcb-1839-493f-b485-0bb9ea061bc2","Type":"ContainerStarted","Data":"472145cc3bfbf8b1d2a0a733ed3e52c5a3add63dc0a017ab04740f0e61d91531"} Oct 11 04:10:13 crc kubenswrapper[4798]: I1011 04:10:13.644301 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"2d1fad00-1405-4149-96d6-7ef60d34c4f1","Type":"ContainerStarted","Data":"a593244851ee2bdbe6c9e26d50b98146947f230ad5ac9df68749fcac3c042f01"} Oct 11 04:10:13 crc kubenswrapper[4798]: I1011 04:10:13.647293 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"6ff4b0aa-8ae0-4301-8bb1-6978cfa47207","Type":"ContainerStarted","Data":"1d277328fed024bbbb27bfe2c63f2502b59c27bfe433add673853cd48171b6ca"} Oct 11 04:10:13 crc kubenswrapper[4798]: I1011 04:10:13.647715 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Oct 11 04:10:13 crc kubenswrapper[4798]: I1011 04:10:13.649464 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"a3433fc1-acd1-4b1e-9df2-578848000615","Type":"ContainerStarted","Data":"65d257b548599d0f3b715d2f33f33e0114af362f547fe49eb4c64ff8b854c86c"} Oct 11 04:10:13 crc kubenswrapper[4798]: I1011 04:10:13.649828 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/memcached-0" Oct 11 04:10:13 crc kubenswrapper[4798]: I1011 04:10:13.652435 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-666b6646f7-cg6s5" event={"ID":"40f1f306-106b-4b2c-85bf-d97962d6db9a","Type":"ContainerStarted","Data":"3b3f161cab1b47e5b2ef2fbeb548567547c9ae3e381a012139d9e6cc76c4d3ab"} Oct 11 04:10:13 crc kubenswrapper[4798]: I1011 04:10:13.652604 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-666b6646f7-cg6s5" Oct 11 04:10:13 crc kubenswrapper[4798]: I1011 04:10:13.652703 4798 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-666b6646f7-cg6s5" podUID="40f1f306-106b-4b2c-85bf-d97962d6db9a" containerName="dnsmasq-dns" containerID="cri-o://3b3f161cab1b47e5b2ef2fbeb548567547c9ae3e381a012139d9e6cc76c4d3ab" gracePeriod=10 Oct 11 04:10:13 crc kubenswrapper[4798]: I1011 04:10:13.656556 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"76d0b2fe-e594-4a66-8358-d3aed19300ea","Type":"ContainerStarted","Data":"9b3d8a63051d375cc0cc32099559f472e7970722c971340dfb8c671ac9cbfa02"} Oct 11 04:10:13 crc kubenswrapper[4798]: I1011 04:10:13.659054 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"54e4a0f3-c43f-418e-b105-fd19db3ce615","Type":"ContainerStarted","Data":"7611e33c8b567495fff90fd74c97bea58c109a8696f06bf4c40e0ce3573ffdee"} Oct 11 04:10:13 crc kubenswrapper[4798]: I1011 04:10:13.661307 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-tk4gr" event={"ID":"7f9da136-ab4f-4a2e-99e4-e546aa4220f9","Type":"ContainerStarted","Data":"42ba9827c78f9d94fde265464be3f231d6a63ae3bc122fbc919f1fb86aba9d2d"} Oct 11 04:10:13 crc kubenswrapper[4798]: I1011 04:10:13.685572 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/memcached-0" podStartSLOduration=14.69715664 podStartE2EDuration="24.685553947s" podCreationTimestamp="2025-10-11 04:09:49 +0000 UTC" firstStartedPulling="2025-10-11 04:10:02.013222508 +0000 UTC m=+897.349512194" lastFinishedPulling="2025-10-11 04:10:12.001619815 +0000 UTC m=+907.337909501" observedRunningTime="2025-10-11 04:10:13.683615381 +0000 UTC m=+909.019905057" watchObservedRunningTime="2025-10-11 04:10:13.685553947 +0000 UTC m=+909.021843633" Oct 11 04:10:13 crc kubenswrapper[4798]: I1011 04:10:13.732751 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-666b6646f7-cg6s5" podStartSLOduration=13.327089502 podStartE2EDuration="28.732715285s" podCreationTimestamp="2025-10-11 04:09:45 +0000 UTC" firstStartedPulling="2025-10-11 04:09:46.240645003 +0000 UTC m=+881.576934689" lastFinishedPulling="2025-10-11 04:10:01.646270786 +0000 UTC m=+896.982560472" observedRunningTime="2025-10-11 04:10:13.725147204 +0000 UTC m=+909.061436890" watchObservedRunningTime="2025-10-11 04:10:13.732715285 +0000 UTC m=+909.069004971" Oct 11 04:10:13 crc kubenswrapper[4798]: I1011 04:10:13.740012 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=11.747160143 podStartE2EDuration="22.739992339s" podCreationTimestamp="2025-10-11 04:09:51 +0000 UTC" firstStartedPulling="2025-10-11 04:10:02.254296441 +0000 UTC m=+897.590586127" lastFinishedPulling="2025-10-11 04:10:13.247128637 +0000 UTC m=+908.583418323" observedRunningTime="2025-10-11 04:10:13.738179636 +0000 UTC m=+909.074469322" watchObservedRunningTime="2025-10-11 04:10:13.739992339 +0000 UTC m=+909.076282025" Oct 11 04:10:14 crc kubenswrapper[4798]: I1011 04:10:14.119731 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-cg6s5" Oct 11 04:10:14 crc kubenswrapper[4798]: I1011 04:10:14.305981 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vl56m\" (UniqueName: \"kubernetes.io/projected/40f1f306-106b-4b2c-85bf-d97962d6db9a-kube-api-access-vl56m\") pod \"40f1f306-106b-4b2c-85bf-d97962d6db9a\" (UID: \"40f1f306-106b-4b2c-85bf-d97962d6db9a\") " Oct 11 04:10:14 crc kubenswrapper[4798]: I1011 04:10:14.306093 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/40f1f306-106b-4b2c-85bf-d97962d6db9a-config\") pod \"40f1f306-106b-4b2c-85bf-d97962d6db9a\" (UID: \"40f1f306-106b-4b2c-85bf-d97962d6db9a\") " Oct 11 04:10:14 crc kubenswrapper[4798]: I1011 04:10:14.306141 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/40f1f306-106b-4b2c-85bf-d97962d6db9a-dns-svc\") pod \"40f1f306-106b-4b2c-85bf-d97962d6db9a\" (UID: \"40f1f306-106b-4b2c-85bf-d97962d6db9a\") " Oct 11 04:10:14 crc kubenswrapper[4798]: I1011 04:10:14.499219 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/40f1f306-106b-4b2c-85bf-d97962d6db9a-kube-api-access-vl56m" (OuterVolumeSpecName: "kube-api-access-vl56m") pod "40f1f306-106b-4b2c-85bf-d97962d6db9a" (UID: "40f1f306-106b-4b2c-85bf-d97962d6db9a"). InnerVolumeSpecName "kube-api-access-vl56m". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 04:10:14 crc kubenswrapper[4798]: I1011 04:10:14.511012 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vl56m\" (UniqueName: \"kubernetes.io/projected/40f1f306-106b-4b2c-85bf-d97962d6db9a-kube-api-access-vl56m\") on node \"crc\" DevicePath \"\"" Oct 11 04:10:14 crc kubenswrapper[4798]: I1011 04:10:14.598652 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/40f1f306-106b-4b2c-85bf-d97962d6db9a-config" (OuterVolumeSpecName: "config") pod "40f1f306-106b-4b2c-85bf-d97962d6db9a" (UID: "40f1f306-106b-4b2c-85bf-d97962d6db9a"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 04:10:14 crc kubenswrapper[4798]: I1011 04:10:14.613303 4798 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/40f1f306-106b-4b2c-85bf-d97962d6db9a-config\") on node \"crc\" DevicePath \"\"" Oct 11 04:10:14 crc kubenswrapper[4798]: I1011 04:10:14.675108 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-sv678" event={"ID":"3e260c01-682b-4b79-9ebe-c06c29750bfe","Type":"ContainerStarted","Data":"caa28a641ddc032f6c59987715581d81e8564c2dea39640b89cbd021276a6e37"} Oct 11 04:10:14 crc kubenswrapper[4798]: I1011 04:10:14.675363 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-sv678" Oct 11 04:10:14 crc kubenswrapper[4798]: I1011 04:10:14.678207 4798 generic.go:334] "Generic (PLEG): container finished" podID="7f9da136-ab4f-4a2e-99e4-e546aa4220f9" containerID="42ba9827c78f9d94fde265464be3f231d6a63ae3bc122fbc919f1fb86aba9d2d" exitCode=0 Oct 11 04:10:14 crc kubenswrapper[4798]: I1011 04:10:14.678549 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-tk4gr" event={"ID":"7f9da136-ab4f-4a2e-99e4-e546aa4220f9","Type":"ContainerDied","Data":"42ba9827c78f9d94fde265464be3f231d6a63ae3bc122fbc919f1fb86aba9d2d"} Oct 11 04:10:14 crc kubenswrapper[4798]: I1011 04:10:14.683536 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"eff19131-d13b-495f-94a3-097e030fd3cd","Type":"ContainerStarted","Data":"030275056fe2ac998fca1a9e12a7334f798986e02b40dd3ccf746ef1ee118864"} Oct 11 04:10:14 crc kubenswrapper[4798]: I1011 04:10:14.688478 4798 generic.go:334] "Generic (PLEG): container finished" podID="40f1f306-106b-4b2c-85bf-d97962d6db9a" containerID="3b3f161cab1b47e5b2ef2fbeb548567547c9ae3e381a012139d9e6cc76c4d3ab" exitCode=0 Oct 11 04:10:14 crc kubenswrapper[4798]: I1011 04:10:14.688702 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-666b6646f7-cg6s5" event={"ID":"40f1f306-106b-4b2c-85bf-d97962d6db9a","Type":"ContainerDied","Data":"3b3f161cab1b47e5b2ef2fbeb548567547c9ae3e381a012139d9e6cc76c4d3ab"} Oct 11 04:10:14 crc kubenswrapper[4798]: I1011 04:10:14.688847 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-666b6646f7-cg6s5" event={"ID":"40f1f306-106b-4b2c-85bf-d97962d6db9a","Type":"ContainerDied","Data":"2ef3b98679dd57042132a79492753e0403ee66cb23b99d67b6b41c0208ebd63a"} Oct 11 04:10:14 crc kubenswrapper[4798]: I1011 04:10:14.688994 4798 scope.go:117] "RemoveContainer" containerID="3b3f161cab1b47e5b2ef2fbeb548567547c9ae3e381a012139d9e6cc76c4d3ab" Oct 11 04:10:14 crc kubenswrapper[4798]: I1011 04:10:14.688690 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-cg6s5" Oct 11 04:10:14 crc kubenswrapper[4798]: I1011 04:10:14.713104 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-sv678" podStartSLOduration=9.693247061 podStartE2EDuration="19.713028898s" podCreationTimestamp="2025-10-11 04:09:55 +0000 UTC" firstStartedPulling="2025-10-11 04:10:02.559520937 +0000 UTC m=+897.895810623" lastFinishedPulling="2025-10-11 04:10:12.579302774 +0000 UTC m=+907.915592460" observedRunningTime="2025-10-11 04:10:14.697066937 +0000 UTC m=+910.033356633" watchObservedRunningTime="2025-10-11 04:10:14.713028898 +0000 UTC m=+910.049318614" Oct 11 04:10:14 crc kubenswrapper[4798]: I1011 04:10:14.718860 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/40f1f306-106b-4b2c-85bf-d97962d6db9a-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "40f1f306-106b-4b2c-85bf-d97962d6db9a" (UID: "40f1f306-106b-4b2c-85bf-d97962d6db9a"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 04:10:14 crc kubenswrapper[4798]: I1011 04:10:14.788492 4798 scope.go:117] "RemoveContainer" containerID="3a3589820b69b86a23d3f859b412e6e69456a460fff096a87f4593d7ab4a1d61" Oct 11 04:10:14 crc kubenswrapper[4798]: I1011 04:10:14.820191 4798 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/40f1f306-106b-4b2c-85bf-d97962d6db9a-dns-svc\") on node \"crc\" DevicePath \"\"" Oct 11 04:10:14 crc kubenswrapper[4798]: I1011 04:10:14.843833 4798 scope.go:117] "RemoveContainer" containerID="3b3f161cab1b47e5b2ef2fbeb548567547c9ae3e381a012139d9e6cc76c4d3ab" Oct 11 04:10:14 crc kubenswrapper[4798]: E1011 04:10:14.845103 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3b3f161cab1b47e5b2ef2fbeb548567547c9ae3e381a012139d9e6cc76c4d3ab\": container with ID starting with 3b3f161cab1b47e5b2ef2fbeb548567547c9ae3e381a012139d9e6cc76c4d3ab not found: ID does not exist" containerID="3b3f161cab1b47e5b2ef2fbeb548567547c9ae3e381a012139d9e6cc76c4d3ab" Oct 11 04:10:14 crc kubenswrapper[4798]: I1011 04:10:14.845182 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3b3f161cab1b47e5b2ef2fbeb548567547c9ae3e381a012139d9e6cc76c4d3ab"} err="failed to get container status \"3b3f161cab1b47e5b2ef2fbeb548567547c9ae3e381a012139d9e6cc76c4d3ab\": rpc error: code = NotFound desc = could not find container \"3b3f161cab1b47e5b2ef2fbeb548567547c9ae3e381a012139d9e6cc76c4d3ab\": container with ID starting with 3b3f161cab1b47e5b2ef2fbeb548567547c9ae3e381a012139d9e6cc76c4d3ab not found: ID does not exist" Oct 11 04:10:14 crc kubenswrapper[4798]: I1011 04:10:14.845255 4798 scope.go:117] "RemoveContainer" containerID="3a3589820b69b86a23d3f859b412e6e69456a460fff096a87f4593d7ab4a1d61" Oct 11 04:10:14 crc kubenswrapper[4798]: E1011 04:10:14.845762 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3a3589820b69b86a23d3f859b412e6e69456a460fff096a87f4593d7ab4a1d61\": container with ID starting with 3a3589820b69b86a23d3f859b412e6e69456a460fff096a87f4593d7ab4a1d61 not found: ID does not exist" containerID="3a3589820b69b86a23d3f859b412e6e69456a460fff096a87f4593d7ab4a1d61" Oct 11 04:10:14 crc kubenswrapper[4798]: I1011 04:10:14.845815 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3a3589820b69b86a23d3f859b412e6e69456a460fff096a87f4593d7ab4a1d61"} err="failed to get container status \"3a3589820b69b86a23d3f859b412e6e69456a460fff096a87f4593d7ab4a1d61\": rpc error: code = NotFound desc = could not find container \"3a3589820b69b86a23d3f859b412e6e69456a460fff096a87f4593d7ab4a1d61\": container with ID starting with 3a3589820b69b86a23d3f859b412e6e69456a460fff096a87f4593d7ab4a1d61 not found: ID does not exist" Oct 11 04:10:15 crc kubenswrapper[4798]: I1011 04:10:15.030929 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-cg6s5"] Oct 11 04:10:15 crc kubenswrapper[4798]: I1011 04:10:15.037859 4798 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-cg6s5"] Oct 11 04:10:15 crc kubenswrapper[4798]: I1011 04:10:15.438706 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="40f1f306-106b-4b2c-85bf-d97962d6db9a" path="/var/lib/kubelet/pods/40f1f306-106b-4b2c-85bf-d97962d6db9a/volumes" Oct 11 04:10:15 crc kubenswrapper[4798]: I1011 04:10:15.704033 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"f7b18bbb-aead-4356-99b3-f1ee253f86e8","Type":"ContainerStarted","Data":"b460d449f79e7f0722844afeca28dbfdb51a6a5f706ded8dadd5bf0066b6d520"} Oct 11 04:10:15 crc kubenswrapper[4798]: I1011 04:10:15.712242 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-tk4gr" event={"ID":"7f9da136-ab4f-4a2e-99e4-e546aa4220f9","Type":"ContainerStarted","Data":"ca83a764689191441e76e637eef36b0067f3175cef70567b3f7acb1781b05aad"} Oct 11 04:10:15 crc kubenswrapper[4798]: I1011 04:10:15.712285 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-tk4gr" Oct 11 04:10:15 crc kubenswrapper[4798]: I1011 04:10:15.712298 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-tk4gr" event={"ID":"7f9da136-ab4f-4a2e-99e4-e546aa4220f9","Type":"ContainerStarted","Data":"596fb38151701da43a8495edca463800dec07d5828024d37293d473e339533ec"} Oct 11 04:10:15 crc kubenswrapper[4798]: I1011 04:10:15.712541 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-tk4gr" Oct 11 04:10:15 crc kubenswrapper[4798]: I1011 04:10:15.782417 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-ovs-tk4gr" podStartSLOduration=11.439662598 podStartE2EDuration="20.782374791s" podCreationTimestamp="2025-10-11 04:09:55 +0000 UTC" firstStartedPulling="2025-10-11 04:10:02.988639074 +0000 UTC m=+898.324928760" lastFinishedPulling="2025-10-11 04:10:12.331351267 +0000 UTC m=+907.667640953" observedRunningTime="2025-10-11 04:10:15.774565094 +0000 UTC m=+911.110854780" watchObservedRunningTime="2025-10-11 04:10:15.782374791 +0000 UTC m=+911.118664487" Oct 11 04:10:17 crc kubenswrapper[4798]: I1011 04:10:17.759354 4798 generic.go:334] "Generic (PLEG): container finished" podID="54e4a0f3-c43f-418e-b105-fd19db3ce615" containerID="7611e33c8b567495fff90fd74c97bea58c109a8696f06bf4c40e0ce3573ffdee" exitCode=0 Oct 11 04:10:17 crc kubenswrapper[4798]: I1011 04:10:17.759434 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"54e4a0f3-c43f-418e-b105-fd19db3ce615","Type":"ContainerDied","Data":"7611e33c8b567495fff90fd74c97bea58c109a8696f06bf4c40e0ce3573ffdee"} Oct 11 04:10:17 crc kubenswrapper[4798]: I1011 04:10:17.766892 4798 generic.go:334] "Generic (PLEG): container finished" podID="2d1fad00-1405-4149-96d6-7ef60d34c4f1" containerID="a593244851ee2bdbe6c9e26d50b98146947f230ad5ac9df68749fcac3c042f01" exitCode=0 Oct 11 04:10:17 crc kubenswrapper[4798]: I1011 04:10:17.766994 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"2d1fad00-1405-4149-96d6-7ef60d34c4f1","Type":"ContainerDied","Data":"a593244851ee2bdbe6c9e26d50b98146947f230ad5ac9df68749fcac3c042f01"} Oct 11 04:10:17 crc kubenswrapper[4798]: I1011 04:10:17.775274 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"76d0b2fe-e594-4a66-8358-d3aed19300ea","Type":"ContainerStarted","Data":"648111fe86da552e3c944d47f40a2d7c2d9b5db907a1a23312c75b0d1ca000ff"} Oct 11 04:10:17 crc kubenswrapper[4798]: I1011 04:10:17.820707 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-sb-0" podStartSLOduration=5.7050744479999995 podStartE2EDuration="20.820690636s" podCreationTimestamp="2025-10-11 04:09:57 +0000 UTC" firstStartedPulling="2025-10-11 04:10:02.358742228 +0000 UTC m=+897.695031914" lastFinishedPulling="2025-10-11 04:10:17.474358416 +0000 UTC m=+912.810648102" observedRunningTime="2025-10-11 04:10:17.816787832 +0000 UTC m=+913.153077518" watchObservedRunningTime="2025-10-11 04:10:17.820690636 +0000 UTC m=+913.156980322" Oct 11 04:10:18 crc kubenswrapper[4798]: I1011 04:10:18.459935 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-sb-0" Oct 11 04:10:18 crc kubenswrapper[4798]: I1011 04:10:18.789244 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"54e4a0f3-c43f-418e-b105-fd19db3ce615","Type":"ContainerStarted","Data":"cc244ae8bcfd74468eab03a0e3fea984d26670db765c54f4343a6130759d8ac0"} Oct 11 04:10:18 crc kubenswrapper[4798]: I1011 04:10:18.793909 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"5fcb3fcb-1839-493f-b485-0bb9ea061bc2","Type":"ContainerStarted","Data":"ff602ea0f798db073a4f395e16fb8502e052b80cd8178eef1a8f7225603b39b1"} Oct 11 04:10:18 crc kubenswrapper[4798]: I1011 04:10:18.798264 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"2d1fad00-1405-4149-96d6-7ef60d34c4f1","Type":"ContainerStarted","Data":"502c842227c08d068a3b316ff85626a398ba5a284684bebd06fb7459ccf7034d"} Oct 11 04:10:18 crc kubenswrapper[4798]: I1011 04:10:18.827266 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-galera-0" podStartSLOduration=23.468612794 podStartE2EDuration="32.827249327s" podCreationTimestamp="2025-10-11 04:09:46 +0000 UTC" firstStartedPulling="2025-10-11 04:10:02.642915531 +0000 UTC m=+897.979205217" lastFinishedPulling="2025-10-11 04:10:12.001552074 +0000 UTC m=+907.337841750" observedRunningTime="2025-10-11 04:10:18.823261582 +0000 UTC m=+914.159551288" watchObservedRunningTime="2025-10-11 04:10:18.827249327 +0000 UTC m=+914.163539013" Oct 11 04:10:18 crc kubenswrapper[4798]: I1011 04:10:18.881852 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-cell1-galera-0" podStartSLOduration=20.09374162 podStartE2EDuration="30.881824902s" podCreationTimestamp="2025-10-11 04:09:48 +0000 UTC" firstStartedPulling="2025-10-11 04:10:02.267467965 +0000 UTC m=+897.603757651" lastFinishedPulling="2025-10-11 04:10:13.055551237 +0000 UTC m=+908.391840933" observedRunningTime="2025-10-11 04:10:18.878303987 +0000 UTC m=+914.214593693" watchObservedRunningTime="2025-10-11 04:10:18.881824902 +0000 UTC m=+914.218114588" Oct 11 04:10:18 crc kubenswrapper[4798]: I1011 04:10:18.886291 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-nb-0" podStartSLOduration=9.130165532 podStartE2EDuration="23.886257037s" podCreationTimestamp="2025-10-11 04:09:55 +0000 UTC" firstStartedPulling="2025-10-11 04:10:02.705550567 +0000 UTC m=+898.041840253" lastFinishedPulling="2025-10-11 04:10:17.461642072 +0000 UTC m=+912.797931758" observedRunningTime="2025-10-11 04:10:18.850360669 +0000 UTC m=+914.186650385" watchObservedRunningTime="2025-10-11 04:10:18.886257037 +0000 UTC m=+914.222546723" Oct 11 04:10:19 crc kubenswrapper[4798]: I1011 04:10:19.459883 4798 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-sb-0" Oct 11 04:10:19 crc kubenswrapper[4798]: I1011 04:10:19.512647 4798 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-sb-0" Oct 11 04:10:19 crc kubenswrapper[4798]: I1011 04:10:19.756961 4798 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-cell1-galera-0" Oct 11 04:10:19 crc kubenswrapper[4798]: I1011 04:10:19.757043 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-cell1-galera-0" Oct 11 04:10:19 crc kubenswrapper[4798]: I1011 04:10:19.871317 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-sb-0" Oct 11 04:10:20 crc kubenswrapper[4798]: I1011 04:10:20.160350 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6bc7876d45-krbz8"] Oct 11 04:10:20 crc kubenswrapper[4798]: E1011 04:10:20.161069 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="40f1f306-106b-4b2c-85bf-d97962d6db9a" containerName="init" Oct 11 04:10:20 crc kubenswrapper[4798]: I1011 04:10:20.161088 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="40f1f306-106b-4b2c-85bf-d97962d6db9a" containerName="init" Oct 11 04:10:20 crc kubenswrapper[4798]: E1011 04:10:20.161103 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="40f1f306-106b-4b2c-85bf-d97962d6db9a" containerName="dnsmasq-dns" Oct 11 04:10:20 crc kubenswrapper[4798]: I1011 04:10:20.161110 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="40f1f306-106b-4b2c-85bf-d97962d6db9a" containerName="dnsmasq-dns" Oct 11 04:10:20 crc kubenswrapper[4798]: I1011 04:10:20.161263 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="40f1f306-106b-4b2c-85bf-d97962d6db9a" containerName="dnsmasq-dns" Oct 11 04:10:20 crc kubenswrapper[4798]: I1011 04:10:20.162247 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6bc7876d45-krbz8" Oct 11 04:10:20 crc kubenswrapper[4798]: I1011 04:10:20.164641 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-sb" Oct 11 04:10:20 crc kubenswrapper[4798]: I1011 04:10:20.188147 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6bc7876d45-krbz8"] Oct 11 04:10:20 crc kubenswrapper[4798]: I1011 04:10:20.206710 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-metrics-m5tjs"] Oct 11 04:10:20 crc kubenswrapper[4798]: I1011 04:10:20.207742 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-m5tjs" Oct 11 04:10:20 crc kubenswrapper[4798]: I1011 04:10:20.215586 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-metrics-config" Oct 11 04:10:20 crc kubenswrapper[4798]: I1011 04:10:20.219734 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/memcached-0" Oct 11 04:10:20 crc kubenswrapper[4798]: I1011 04:10:20.234914 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-m5tjs"] Oct 11 04:10:20 crc kubenswrapper[4798]: I1011 04:10:20.329774 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/18b39a28-1973-440d-8294-c9aa434cd56d-config\") pod \"ovn-controller-metrics-m5tjs\" (UID: \"18b39a28-1973-440d-8294-c9aa434cd56d\") " pod="openstack/ovn-controller-metrics-m5tjs" Oct 11 04:10:20 crc kubenswrapper[4798]: I1011 04:10:20.329851 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/18b39a28-1973-440d-8294-c9aa434cd56d-combined-ca-bundle\") pod \"ovn-controller-metrics-m5tjs\" (UID: \"18b39a28-1973-440d-8294-c9aa434cd56d\") " pod="openstack/ovn-controller-metrics-m5tjs" Oct 11 04:10:20 crc kubenswrapper[4798]: I1011 04:10:20.329954 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/18b39a28-1973-440d-8294-c9aa434cd56d-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-m5tjs\" (UID: \"18b39a28-1973-440d-8294-c9aa434cd56d\") " pod="openstack/ovn-controller-metrics-m5tjs" Oct 11 04:10:20 crc kubenswrapper[4798]: I1011 04:10:20.330027 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a92a0a1d-f1be-43f6-b4eb-21fdb21382e5-config\") pod \"dnsmasq-dns-6bc7876d45-krbz8\" (UID: \"a92a0a1d-f1be-43f6-b4eb-21fdb21382e5\") " pod="openstack/dnsmasq-dns-6bc7876d45-krbz8" Oct 11 04:10:20 crc kubenswrapper[4798]: I1011 04:10:20.330060 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/18b39a28-1973-440d-8294-c9aa434cd56d-ovs-rundir\") pod \"ovn-controller-metrics-m5tjs\" (UID: \"18b39a28-1973-440d-8294-c9aa434cd56d\") " pod="openstack/ovn-controller-metrics-m5tjs" Oct 11 04:10:20 crc kubenswrapper[4798]: I1011 04:10:20.330098 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a92a0a1d-f1be-43f6-b4eb-21fdb21382e5-dns-svc\") pod \"dnsmasq-dns-6bc7876d45-krbz8\" (UID: \"a92a0a1d-f1be-43f6-b4eb-21fdb21382e5\") " pod="openstack/dnsmasq-dns-6bc7876d45-krbz8" Oct 11 04:10:20 crc kubenswrapper[4798]: I1011 04:10:20.330114 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qwxbj\" (UniqueName: \"kubernetes.io/projected/18b39a28-1973-440d-8294-c9aa434cd56d-kube-api-access-qwxbj\") pod \"ovn-controller-metrics-m5tjs\" (UID: \"18b39a28-1973-440d-8294-c9aa434cd56d\") " pod="openstack/ovn-controller-metrics-m5tjs" Oct 11 04:10:20 crc kubenswrapper[4798]: I1011 04:10:20.330136 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a92a0a1d-f1be-43f6-b4eb-21fdb21382e5-ovsdbserver-sb\") pod \"dnsmasq-dns-6bc7876d45-krbz8\" (UID: \"a92a0a1d-f1be-43f6-b4eb-21fdb21382e5\") " pod="openstack/dnsmasq-dns-6bc7876d45-krbz8" Oct 11 04:10:20 crc kubenswrapper[4798]: I1011 04:10:20.330154 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/18b39a28-1973-440d-8294-c9aa434cd56d-ovn-rundir\") pod \"ovn-controller-metrics-m5tjs\" (UID: \"18b39a28-1973-440d-8294-c9aa434cd56d\") " pod="openstack/ovn-controller-metrics-m5tjs" Oct 11 04:10:20 crc kubenswrapper[4798]: I1011 04:10:20.330207 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2f78t\" (UniqueName: \"kubernetes.io/projected/a92a0a1d-f1be-43f6-b4eb-21fdb21382e5-kube-api-access-2f78t\") pod \"dnsmasq-dns-6bc7876d45-krbz8\" (UID: \"a92a0a1d-f1be-43f6-b4eb-21fdb21382e5\") " pod="openstack/dnsmasq-dns-6bc7876d45-krbz8" Oct 11 04:10:20 crc kubenswrapper[4798]: I1011 04:10:20.431570 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/18b39a28-1973-440d-8294-c9aa434cd56d-config\") pod \"ovn-controller-metrics-m5tjs\" (UID: \"18b39a28-1973-440d-8294-c9aa434cd56d\") " pod="openstack/ovn-controller-metrics-m5tjs" Oct 11 04:10:20 crc kubenswrapper[4798]: I1011 04:10:20.431620 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/18b39a28-1973-440d-8294-c9aa434cd56d-combined-ca-bundle\") pod \"ovn-controller-metrics-m5tjs\" (UID: \"18b39a28-1973-440d-8294-c9aa434cd56d\") " pod="openstack/ovn-controller-metrics-m5tjs" Oct 11 04:10:20 crc kubenswrapper[4798]: I1011 04:10:20.431646 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/18b39a28-1973-440d-8294-c9aa434cd56d-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-m5tjs\" (UID: \"18b39a28-1973-440d-8294-c9aa434cd56d\") " pod="openstack/ovn-controller-metrics-m5tjs" Oct 11 04:10:20 crc kubenswrapper[4798]: I1011 04:10:20.431687 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a92a0a1d-f1be-43f6-b4eb-21fdb21382e5-config\") pod \"dnsmasq-dns-6bc7876d45-krbz8\" (UID: \"a92a0a1d-f1be-43f6-b4eb-21fdb21382e5\") " pod="openstack/dnsmasq-dns-6bc7876d45-krbz8" Oct 11 04:10:20 crc kubenswrapper[4798]: I1011 04:10:20.431709 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/18b39a28-1973-440d-8294-c9aa434cd56d-ovs-rundir\") pod \"ovn-controller-metrics-m5tjs\" (UID: \"18b39a28-1973-440d-8294-c9aa434cd56d\") " pod="openstack/ovn-controller-metrics-m5tjs" Oct 11 04:10:20 crc kubenswrapper[4798]: I1011 04:10:20.431737 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a92a0a1d-f1be-43f6-b4eb-21fdb21382e5-dns-svc\") pod \"dnsmasq-dns-6bc7876d45-krbz8\" (UID: \"a92a0a1d-f1be-43f6-b4eb-21fdb21382e5\") " pod="openstack/dnsmasq-dns-6bc7876d45-krbz8" Oct 11 04:10:20 crc kubenswrapper[4798]: I1011 04:10:20.431759 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qwxbj\" (UniqueName: \"kubernetes.io/projected/18b39a28-1973-440d-8294-c9aa434cd56d-kube-api-access-qwxbj\") pod \"ovn-controller-metrics-m5tjs\" (UID: \"18b39a28-1973-440d-8294-c9aa434cd56d\") " pod="openstack/ovn-controller-metrics-m5tjs" Oct 11 04:10:20 crc kubenswrapper[4798]: I1011 04:10:20.431780 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a92a0a1d-f1be-43f6-b4eb-21fdb21382e5-ovsdbserver-sb\") pod \"dnsmasq-dns-6bc7876d45-krbz8\" (UID: \"a92a0a1d-f1be-43f6-b4eb-21fdb21382e5\") " pod="openstack/dnsmasq-dns-6bc7876d45-krbz8" Oct 11 04:10:20 crc kubenswrapper[4798]: I1011 04:10:20.431797 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/18b39a28-1973-440d-8294-c9aa434cd56d-ovn-rundir\") pod \"ovn-controller-metrics-m5tjs\" (UID: \"18b39a28-1973-440d-8294-c9aa434cd56d\") " pod="openstack/ovn-controller-metrics-m5tjs" Oct 11 04:10:20 crc kubenswrapper[4798]: I1011 04:10:20.431846 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2f78t\" (UniqueName: \"kubernetes.io/projected/a92a0a1d-f1be-43f6-b4eb-21fdb21382e5-kube-api-access-2f78t\") pod \"dnsmasq-dns-6bc7876d45-krbz8\" (UID: \"a92a0a1d-f1be-43f6-b4eb-21fdb21382e5\") " pod="openstack/dnsmasq-dns-6bc7876d45-krbz8" Oct 11 04:10:20 crc kubenswrapper[4798]: I1011 04:10:20.433003 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/18b39a28-1973-440d-8294-c9aa434cd56d-config\") pod \"ovn-controller-metrics-m5tjs\" (UID: \"18b39a28-1973-440d-8294-c9aa434cd56d\") " pod="openstack/ovn-controller-metrics-m5tjs" Oct 11 04:10:20 crc kubenswrapper[4798]: I1011 04:10:20.434552 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a92a0a1d-f1be-43f6-b4eb-21fdb21382e5-dns-svc\") pod \"dnsmasq-dns-6bc7876d45-krbz8\" (UID: \"a92a0a1d-f1be-43f6-b4eb-21fdb21382e5\") " pod="openstack/dnsmasq-dns-6bc7876d45-krbz8" Oct 11 04:10:20 crc kubenswrapper[4798]: I1011 04:10:20.434738 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/18b39a28-1973-440d-8294-c9aa434cd56d-ovs-rundir\") pod \"ovn-controller-metrics-m5tjs\" (UID: \"18b39a28-1973-440d-8294-c9aa434cd56d\") " pod="openstack/ovn-controller-metrics-m5tjs" Oct 11 04:10:20 crc kubenswrapper[4798]: I1011 04:10:20.434979 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a92a0a1d-f1be-43f6-b4eb-21fdb21382e5-config\") pod \"dnsmasq-dns-6bc7876d45-krbz8\" (UID: \"a92a0a1d-f1be-43f6-b4eb-21fdb21382e5\") " pod="openstack/dnsmasq-dns-6bc7876d45-krbz8" Oct 11 04:10:20 crc kubenswrapper[4798]: I1011 04:10:20.435078 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/18b39a28-1973-440d-8294-c9aa434cd56d-ovn-rundir\") pod \"ovn-controller-metrics-m5tjs\" (UID: \"18b39a28-1973-440d-8294-c9aa434cd56d\") " pod="openstack/ovn-controller-metrics-m5tjs" Oct 11 04:10:20 crc kubenswrapper[4798]: I1011 04:10:20.435640 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a92a0a1d-f1be-43f6-b4eb-21fdb21382e5-ovsdbserver-sb\") pod \"dnsmasq-dns-6bc7876d45-krbz8\" (UID: \"a92a0a1d-f1be-43f6-b4eb-21fdb21382e5\") " pod="openstack/dnsmasq-dns-6bc7876d45-krbz8" Oct 11 04:10:20 crc kubenswrapper[4798]: I1011 04:10:20.441189 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/18b39a28-1973-440d-8294-c9aa434cd56d-combined-ca-bundle\") pod \"ovn-controller-metrics-m5tjs\" (UID: \"18b39a28-1973-440d-8294-c9aa434cd56d\") " pod="openstack/ovn-controller-metrics-m5tjs" Oct 11 04:10:20 crc kubenswrapper[4798]: I1011 04:10:20.441563 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/18b39a28-1973-440d-8294-c9aa434cd56d-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-m5tjs\" (UID: \"18b39a28-1973-440d-8294-c9aa434cd56d\") " pod="openstack/ovn-controller-metrics-m5tjs" Oct 11 04:10:20 crc kubenswrapper[4798]: I1011 04:10:20.455040 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2f78t\" (UniqueName: \"kubernetes.io/projected/a92a0a1d-f1be-43f6-b4eb-21fdb21382e5-kube-api-access-2f78t\") pod \"dnsmasq-dns-6bc7876d45-krbz8\" (UID: \"a92a0a1d-f1be-43f6-b4eb-21fdb21382e5\") " pod="openstack/dnsmasq-dns-6bc7876d45-krbz8" Oct 11 04:10:20 crc kubenswrapper[4798]: I1011 04:10:20.468079 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qwxbj\" (UniqueName: \"kubernetes.io/projected/18b39a28-1973-440d-8294-c9aa434cd56d-kube-api-access-qwxbj\") pod \"ovn-controller-metrics-m5tjs\" (UID: \"18b39a28-1973-440d-8294-c9aa434cd56d\") " pod="openstack/ovn-controller-metrics-m5tjs" Oct 11 04:10:20 crc kubenswrapper[4798]: I1011 04:10:20.481069 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6bc7876d45-krbz8" Oct 11 04:10:20 crc kubenswrapper[4798]: I1011 04:10:20.536464 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-m5tjs" Oct 11 04:10:20 crc kubenswrapper[4798]: I1011 04:10:20.642802 4798 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-nb-0" Oct 11 04:10:20 crc kubenswrapper[4798]: I1011 04:10:20.743447 4798 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-nb-0" Oct 11 04:10:20 crc kubenswrapper[4798]: I1011 04:10:20.744696 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6bc7876d45-krbz8"] Oct 11 04:10:20 crc kubenswrapper[4798]: I1011 04:10:20.791516 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-8554648995-lhhsb"] Oct 11 04:10:20 crc kubenswrapper[4798]: I1011 04:10:20.792813 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8554648995-lhhsb" Oct 11 04:10:20 crc kubenswrapper[4798]: I1011 04:10:20.796364 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-nb" Oct 11 04:10:20 crc kubenswrapper[4798]: I1011 04:10:20.819253 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-8554648995-lhhsb"] Oct 11 04:10:20 crc kubenswrapper[4798]: I1011 04:10:20.854749 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-nb-0" Oct 11 04:10:20 crc kubenswrapper[4798]: I1011 04:10:20.905817 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-nb-0" Oct 11 04:10:20 crc kubenswrapper[4798]: I1011 04:10:20.952388 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8d972df7-8eff-4cc6-92a1-799f816bc198-ovsdbserver-sb\") pod \"dnsmasq-dns-8554648995-lhhsb\" (UID: \"8d972df7-8eff-4cc6-92a1-799f816bc198\") " pod="openstack/dnsmasq-dns-8554648995-lhhsb" Oct 11 04:10:20 crc kubenswrapper[4798]: I1011 04:10:20.952469 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8d972df7-8eff-4cc6-92a1-799f816bc198-config\") pod \"dnsmasq-dns-8554648995-lhhsb\" (UID: \"8d972df7-8eff-4cc6-92a1-799f816bc198\") " pod="openstack/dnsmasq-dns-8554648995-lhhsb" Oct 11 04:10:20 crc kubenswrapper[4798]: I1011 04:10:20.952573 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8d972df7-8eff-4cc6-92a1-799f816bc198-dns-svc\") pod \"dnsmasq-dns-8554648995-lhhsb\" (UID: \"8d972df7-8eff-4cc6-92a1-799f816bc198\") " pod="openstack/dnsmasq-dns-8554648995-lhhsb" Oct 11 04:10:20 crc kubenswrapper[4798]: I1011 04:10:20.954735 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w6bss\" (UniqueName: \"kubernetes.io/projected/8d972df7-8eff-4cc6-92a1-799f816bc198-kube-api-access-w6bss\") pod \"dnsmasq-dns-8554648995-lhhsb\" (UID: \"8d972df7-8eff-4cc6-92a1-799f816bc198\") " pod="openstack/dnsmasq-dns-8554648995-lhhsb" Oct 11 04:10:20 crc kubenswrapper[4798]: I1011 04:10:20.954816 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8d972df7-8eff-4cc6-92a1-799f816bc198-ovsdbserver-nb\") pod \"dnsmasq-dns-8554648995-lhhsb\" (UID: \"8d972df7-8eff-4cc6-92a1-799f816bc198\") " pod="openstack/dnsmasq-dns-8554648995-lhhsb" Oct 11 04:10:21 crc kubenswrapper[4798]: I1011 04:10:21.056917 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w6bss\" (UniqueName: \"kubernetes.io/projected/8d972df7-8eff-4cc6-92a1-799f816bc198-kube-api-access-w6bss\") pod \"dnsmasq-dns-8554648995-lhhsb\" (UID: \"8d972df7-8eff-4cc6-92a1-799f816bc198\") " pod="openstack/dnsmasq-dns-8554648995-lhhsb" Oct 11 04:10:21 crc kubenswrapper[4798]: I1011 04:10:21.056982 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8d972df7-8eff-4cc6-92a1-799f816bc198-ovsdbserver-nb\") pod \"dnsmasq-dns-8554648995-lhhsb\" (UID: \"8d972df7-8eff-4cc6-92a1-799f816bc198\") " pod="openstack/dnsmasq-dns-8554648995-lhhsb" Oct 11 04:10:21 crc kubenswrapper[4798]: I1011 04:10:21.057019 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8d972df7-8eff-4cc6-92a1-799f816bc198-ovsdbserver-sb\") pod \"dnsmasq-dns-8554648995-lhhsb\" (UID: \"8d972df7-8eff-4cc6-92a1-799f816bc198\") " pod="openstack/dnsmasq-dns-8554648995-lhhsb" Oct 11 04:10:21 crc kubenswrapper[4798]: I1011 04:10:21.057049 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8d972df7-8eff-4cc6-92a1-799f816bc198-config\") pod \"dnsmasq-dns-8554648995-lhhsb\" (UID: \"8d972df7-8eff-4cc6-92a1-799f816bc198\") " pod="openstack/dnsmasq-dns-8554648995-lhhsb" Oct 11 04:10:21 crc kubenswrapper[4798]: I1011 04:10:21.057099 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8d972df7-8eff-4cc6-92a1-799f816bc198-dns-svc\") pod \"dnsmasq-dns-8554648995-lhhsb\" (UID: \"8d972df7-8eff-4cc6-92a1-799f816bc198\") " pod="openstack/dnsmasq-dns-8554648995-lhhsb" Oct 11 04:10:21 crc kubenswrapper[4798]: I1011 04:10:21.058846 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8d972df7-8eff-4cc6-92a1-799f816bc198-ovsdbserver-nb\") pod \"dnsmasq-dns-8554648995-lhhsb\" (UID: \"8d972df7-8eff-4cc6-92a1-799f816bc198\") " pod="openstack/dnsmasq-dns-8554648995-lhhsb" Oct 11 04:10:21 crc kubenswrapper[4798]: I1011 04:10:21.059068 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8d972df7-8eff-4cc6-92a1-799f816bc198-ovsdbserver-sb\") pod \"dnsmasq-dns-8554648995-lhhsb\" (UID: \"8d972df7-8eff-4cc6-92a1-799f816bc198\") " pod="openstack/dnsmasq-dns-8554648995-lhhsb" Oct 11 04:10:21 crc kubenswrapper[4798]: I1011 04:10:21.059199 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8d972df7-8eff-4cc6-92a1-799f816bc198-dns-svc\") pod \"dnsmasq-dns-8554648995-lhhsb\" (UID: \"8d972df7-8eff-4cc6-92a1-799f816bc198\") " pod="openstack/dnsmasq-dns-8554648995-lhhsb" Oct 11 04:10:21 crc kubenswrapper[4798]: I1011 04:10:21.061491 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8d972df7-8eff-4cc6-92a1-799f816bc198-config\") pod \"dnsmasq-dns-8554648995-lhhsb\" (UID: \"8d972df7-8eff-4cc6-92a1-799f816bc198\") " pod="openstack/dnsmasq-dns-8554648995-lhhsb" Oct 11 04:10:21 crc kubenswrapper[4798]: I1011 04:10:21.082584 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w6bss\" (UniqueName: \"kubernetes.io/projected/8d972df7-8eff-4cc6-92a1-799f816bc198-kube-api-access-w6bss\") pod \"dnsmasq-dns-8554648995-lhhsb\" (UID: \"8d972df7-8eff-4cc6-92a1-799f816bc198\") " pod="openstack/dnsmasq-dns-8554648995-lhhsb" Oct 11 04:10:21 crc kubenswrapper[4798]: I1011 04:10:21.135837 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8554648995-lhhsb" Oct 11 04:10:21 crc kubenswrapper[4798]: I1011 04:10:21.172237 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6bc7876d45-krbz8"] Oct 11 04:10:21 crc kubenswrapper[4798]: I1011 04:10:21.292646 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-northd-0"] Oct 11 04:10:21 crc kubenswrapper[4798]: I1011 04:10:21.298597 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Oct 11 04:10:21 crc kubenswrapper[4798]: I1011 04:10:21.302552 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-config" Oct 11 04:10:21 crc kubenswrapper[4798]: I1011 04:10:21.302933 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovnnorthd-ovndbs" Oct 11 04:10:21 crc kubenswrapper[4798]: I1011 04:10:21.302987 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovnnorthd-ovnnorthd-dockercfg-hks7k" Oct 11 04:10:21 crc kubenswrapper[4798]: I1011 04:10:21.303267 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-scripts" Oct 11 04:10:21 crc kubenswrapper[4798]: I1011 04:10:21.305441 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Oct 11 04:10:21 crc kubenswrapper[4798]: I1011 04:10:21.361617 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-m5tjs"] Oct 11 04:10:21 crc kubenswrapper[4798]: I1011 04:10:21.363124 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2c3825ca-11d1-4b60-b0c5-2f4b80a7fd01-config\") pod \"ovn-northd-0\" (UID: \"2c3825ca-11d1-4b60-b0c5-2f4b80a7fd01\") " pod="openstack/ovn-northd-0" Oct 11 04:10:21 crc kubenswrapper[4798]: I1011 04:10:21.363179 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/2c3825ca-11d1-4b60-b0c5-2f4b80a7fd01-scripts\") pod \"ovn-northd-0\" (UID: \"2c3825ca-11d1-4b60-b0c5-2f4b80a7fd01\") " pod="openstack/ovn-northd-0" Oct 11 04:10:21 crc kubenswrapper[4798]: I1011 04:10:21.363234 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/2c3825ca-11d1-4b60-b0c5-2f4b80a7fd01-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"2c3825ca-11d1-4b60-b0c5-2f4b80a7fd01\") " pod="openstack/ovn-northd-0" Oct 11 04:10:21 crc kubenswrapper[4798]: I1011 04:10:21.363258 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/2c3825ca-11d1-4b60-b0c5-2f4b80a7fd01-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"2c3825ca-11d1-4b60-b0c5-2f4b80a7fd01\") " pod="openstack/ovn-northd-0" Oct 11 04:10:21 crc kubenswrapper[4798]: I1011 04:10:21.363283 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rztkg\" (UniqueName: \"kubernetes.io/projected/2c3825ca-11d1-4b60-b0c5-2f4b80a7fd01-kube-api-access-rztkg\") pod \"ovn-northd-0\" (UID: \"2c3825ca-11d1-4b60-b0c5-2f4b80a7fd01\") " pod="openstack/ovn-northd-0" Oct 11 04:10:21 crc kubenswrapper[4798]: I1011 04:10:21.363315 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/2c3825ca-11d1-4b60-b0c5-2f4b80a7fd01-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"2c3825ca-11d1-4b60-b0c5-2f4b80a7fd01\") " pod="openstack/ovn-northd-0" Oct 11 04:10:21 crc kubenswrapper[4798]: I1011 04:10:21.363339 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2c3825ca-11d1-4b60-b0c5-2f4b80a7fd01-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"2c3825ca-11d1-4b60-b0c5-2f4b80a7fd01\") " pod="openstack/ovn-northd-0" Oct 11 04:10:21 crc kubenswrapper[4798]: I1011 04:10:21.466025 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/2c3825ca-11d1-4b60-b0c5-2f4b80a7fd01-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"2c3825ca-11d1-4b60-b0c5-2f4b80a7fd01\") " pod="openstack/ovn-northd-0" Oct 11 04:10:21 crc kubenswrapper[4798]: I1011 04:10:21.469557 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rztkg\" (UniqueName: \"kubernetes.io/projected/2c3825ca-11d1-4b60-b0c5-2f4b80a7fd01-kube-api-access-rztkg\") pod \"ovn-northd-0\" (UID: \"2c3825ca-11d1-4b60-b0c5-2f4b80a7fd01\") " pod="openstack/ovn-northd-0" Oct 11 04:10:21 crc kubenswrapper[4798]: I1011 04:10:21.469613 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/2c3825ca-11d1-4b60-b0c5-2f4b80a7fd01-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"2c3825ca-11d1-4b60-b0c5-2f4b80a7fd01\") " pod="openstack/ovn-northd-0" Oct 11 04:10:21 crc kubenswrapper[4798]: I1011 04:10:21.469637 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2c3825ca-11d1-4b60-b0c5-2f4b80a7fd01-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"2c3825ca-11d1-4b60-b0c5-2f4b80a7fd01\") " pod="openstack/ovn-northd-0" Oct 11 04:10:21 crc kubenswrapper[4798]: I1011 04:10:21.469754 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2c3825ca-11d1-4b60-b0c5-2f4b80a7fd01-config\") pod \"ovn-northd-0\" (UID: \"2c3825ca-11d1-4b60-b0c5-2f4b80a7fd01\") " pod="openstack/ovn-northd-0" Oct 11 04:10:21 crc kubenswrapper[4798]: I1011 04:10:21.469819 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/2c3825ca-11d1-4b60-b0c5-2f4b80a7fd01-scripts\") pod \"ovn-northd-0\" (UID: \"2c3825ca-11d1-4b60-b0c5-2f4b80a7fd01\") " pod="openstack/ovn-northd-0" Oct 11 04:10:21 crc kubenswrapper[4798]: I1011 04:10:21.469896 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/2c3825ca-11d1-4b60-b0c5-2f4b80a7fd01-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"2c3825ca-11d1-4b60-b0c5-2f4b80a7fd01\") " pod="openstack/ovn-northd-0" Oct 11 04:10:21 crc kubenswrapper[4798]: I1011 04:10:21.470307 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/2c3825ca-11d1-4b60-b0c5-2f4b80a7fd01-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"2c3825ca-11d1-4b60-b0c5-2f4b80a7fd01\") " pod="openstack/ovn-northd-0" Oct 11 04:10:21 crc kubenswrapper[4798]: I1011 04:10:21.472695 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2c3825ca-11d1-4b60-b0c5-2f4b80a7fd01-config\") pod \"ovn-northd-0\" (UID: \"2c3825ca-11d1-4b60-b0c5-2f4b80a7fd01\") " pod="openstack/ovn-northd-0" Oct 11 04:10:21 crc kubenswrapper[4798]: I1011 04:10:21.473984 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/2c3825ca-11d1-4b60-b0c5-2f4b80a7fd01-scripts\") pod \"ovn-northd-0\" (UID: \"2c3825ca-11d1-4b60-b0c5-2f4b80a7fd01\") " pod="openstack/ovn-northd-0" Oct 11 04:10:21 crc kubenswrapper[4798]: I1011 04:10:21.474289 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/2c3825ca-11d1-4b60-b0c5-2f4b80a7fd01-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"2c3825ca-11d1-4b60-b0c5-2f4b80a7fd01\") " pod="openstack/ovn-northd-0" Oct 11 04:10:21 crc kubenswrapper[4798]: I1011 04:10:21.474924 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/2c3825ca-11d1-4b60-b0c5-2f4b80a7fd01-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"2c3825ca-11d1-4b60-b0c5-2f4b80a7fd01\") " pod="openstack/ovn-northd-0" Oct 11 04:10:21 crc kubenswrapper[4798]: I1011 04:10:21.475940 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2c3825ca-11d1-4b60-b0c5-2f4b80a7fd01-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"2c3825ca-11d1-4b60-b0c5-2f4b80a7fd01\") " pod="openstack/ovn-northd-0" Oct 11 04:10:21 crc kubenswrapper[4798]: I1011 04:10:21.489923 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rztkg\" (UniqueName: \"kubernetes.io/projected/2c3825ca-11d1-4b60-b0c5-2f4b80a7fd01-kube-api-access-rztkg\") pod \"ovn-northd-0\" (UID: \"2c3825ca-11d1-4b60-b0c5-2f4b80a7fd01\") " pod="openstack/ovn-northd-0" Oct 11 04:10:21 crc kubenswrapper[4798]: I1011 04:10:21.648717 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Oct 11 04:10:21 crc kubenswrapper[4798]: I1011 04:10:21.692021 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-8554648995-lhhsb"] Oct 11 04:10:21 crc kubenswrapper[4798]: W1011 04:10:21.699144 4798 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8d972df7_8eff_4cc6_92a1_799f816bc198.slice/crio-0d4fc09c63066d217198448312807a70d2c12164108b7e789978564067be8c26 WatchSource:0}: Error finding container 0d4fc09c63066d217198448312807a70d2c12164108b7e789978564067be8c26: Status 404 returned error can't find the container with id 0d4fc09c63066d217198448312807a70d2c12164108b7e789978564067be8c26 Oct 11 04:10:21 crc kubenswrapper[4798]: I1011 04:10:21.870667 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Oct 11 04:10:21 crc kubenswrapper[4798]: I1011 04:10:21.912735 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-m5tjs" event={"ID":"18b39a28-1973-440d-8294-c9aa434cd56d","Type":"ContainerStarted","Data":"3d33981c5f56fef3b776558ab6d6815c59bd311d092ff31de1a82d72d7b081c8"} Oct 11 04:10:21 crc kubenswrapper[4798]: I1011 04:10:21.913154 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-m5tjs" event={"ID":"18b39a28-1973-440d-8294-c9aa434cd56d","Type":"ContainerStarted","Data":"243ee60f9970aa9cdd74d0613d5e4a1f7709b8b654b3359c5f0b17d75f1526cb"} Oct 11 04:10:21 crc kubenswrapper[4798]: I1011 04:10:21.933939 4798 generic.go:334] "Generic (PLEG): container finished" podID="a92a0a1d-f1be-43f6-b4eb-21fdb21382e5" containerID="3b7918d2f6333d1abe557bee879f25131a5924ae7ea1c71c5234a255f60ab422" exitCode=0 Oct 11 04:10:21 crc kubenswrapper[4798]: I1011 04:10:21.934229 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6bc7876d45-krbz8" event={"ID":"a92a0a1d-f1be-43f6-b4eb-21fdb21382e5","Type":"ContainerDied","Data":"3b7918d2f6333d1abe557bee879f25131a5924ae7ea1c71c5234a255f60ab422"} Oct 11 04:10:21 crc kubenswrapper[4798]: I1011 04:10:21.934360 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6bc7876d45-krbz8" event={"ID":"a92a0a1d-f1be-43f6-b4eb-21fdb21382e5","Type":"ContainerStarted","Data":"f1a5cde096855aedf953af8be248ffc46768f0fce0adfa082302df3755c5fc95"} Oct 11 04:10:21 crc kubenswrapper[4798]: I1011 04:10:21.938722 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8554648995-lhhsb" event={"ID":"8d972df7-8eff-4cc6-92a1-799f816bc198","Type":"ContainerStarted","Data":"0d4fc09c63066d217198448312807a70d2c12164108b7e789978564067be8c26"} Oct 11 04:10:21 crc kubenswrapper[4798]: I1011 04:10:21.954616 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-metrics-m5tjs" podStartSLOduration=1.954595564 podStartE2EDuration="1.954595564s" podCreationTimestamp="2025-10-11 04:10:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 04:10:21.954102623 +0000 UTC m=+917.290392319" watchObservedRunningTime="2025-10-11 04:10:21.954595564 +0000 UTC m=+917.290885250" Oct 11 04:10:22 crc kubenswrapper[4798]: I1011 04:10:22.165734 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Oct 11 04:10:22 crc kubenswrapper[4798]: I1011 04:10:22.321663 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6bc7876d45-krbz8" Oct 11 04:10:22 crc kubenswrapper[4798]: I1011 04:10:22.489940 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a92a0a1d-f1be-43f6-b4eb-21fdb21382e5-config\") pod \"a92a0a1d-f1be-43f6-b4eb-21fdb21382e5\" (UID: \"a92a0a1d-f1be-43f6-b4eb-21fdb21382e5\") " Oct 11 04:10:22 crc kubenswrapper[4798]: I1011 04:10:22.490047 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a92a0a1d-f1be-43f6-b4eb-21fdb21382e5-ovsdbserver-sb\") pod \"a92a0a1d-f1be-43f6-b4eb-21fdb21382e5\" (UID: \"a92a0a1d-f1be-43f6-b4eb-21fdb21382e5\") " Oct 11 04:10:22 crc kubenswrapper[4798]: I1011 04:10:22.490265 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2f78t\" (UniqueName: \"kubernetes.io/projected/a92a0a1d-f1be-43f6-b4eb-21fdb21382e5-kube-api-access-2f78t\") pod \"a92a0a1d-f1be-43f6-b4eb-21fdb21382e5\" (UID: \"a92a0a1d-f1be-43f6-b4eb-21fdb21382e5\") " Oct 11 04:10:22 crc kubenswrapper[4798]: I1011 04:10:22.490489 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a92a0a1d-f1be-43f6-b4eb-21fdb21382e5-dns-svc\") pod \"a92a0a1d-f1be-43f6-b4eb-21fdb21382e5\" (UID: \"a92a0a1d-f1be-43f6-b4eb-21fdb21382e5\") " Oct 11 04:10:22 crc kubenswrapper[4798]: I1011 04:10:22.496076 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a92a0a1d-f1be-43f6-b4eb-21fdb21382e5-kube-api-access-2f78t" (OuterVolumeSpecName: "kube-api-access-2f78t") pod "a92a0a1d-f1be-43f6-b4eb-21fdb21382e5" (UID: "a92a0a1d-f1be-43f6-b4eb-21fdb21382e5"). InnerVolumeSpecName "kube-api-access-2f78t". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 04:10:22 crc kubenswrapper[4798]: I1011 04:10:22.515810 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a92a0a1d-f1be-43f6-b4eb-21fdb21382e5-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "a92a0a1d-f1be-43f6-b4eb-21fdb21382e5" (UID: "a92a0a1d-f1be-43f6-b4eb-21fdb21382e5"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 04:10:22 crc kubenswrapper[4798]: I1011 04:10:22.516505 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a92a0a1d-f1be-43f6-b4eb-21fdb21382e5-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "a92a0a1d-f1be-43f6-b4eb-21fdb21382e5" (UID: "a92a0a1d-f1be-43f6-b4eb-21fdb21382e5"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 04:10:22 crc kubenswrapper[4798]: I1011 04:10:22.524547 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a92a0a1d-f1be-43f6-b4eb-21fdb21382e5-config" (OuterVolumeSpecName: "config") pod "a92a0a1d-f1be-43f6-b4eb-21fdb21382e5" (UID: "a92a0a1d-f1be-43f6-b4eb-21fdb21382e5"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 04:10:22 crc kubenswrapper[4798]: I1011 04:10:22.592203 4798 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a92a0a1d-f1be-43f6-b4eb-21fdb21382e5-config\") on node \"crc\" DevicePath \"\"" Oct 11 04:10:22 crc kubenswrapper[4798]: I1011 04:10:22.592233 4798 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a92a0a1d-f1be-43f6-b4eb-21fdb21382e5-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Oct 11 04:10:22 crc kubenswrapper[4798]: I1011 04:10:22.592246 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2f78t\" (UniqueName: \"kubernetes.io/projected/a92a0a1d-f1be-43f6-b4eb-21fdb21382e5-kube-api-access-2f78t\") on node \"crc\" DevicePath \"\"" Oct 11 04:10:22 crc kubenswrapper[4798]: I1011 04:10:22.592257 4798 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a92a0a1d-f1be-43f6-b4eb-21fdb21382e5-dns-svc\") on node \"crc\" DevicePath \"\"" Oct 11 04:10:22 crc kubenswrapper[4798]: I1011 04:10:22.948541 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6bc7876d45-krbz8" Oct 11 04:10:22 crc kubenswrapper[4798]: I1011 04:10:22.948745 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6bc7876d45-krbz8" event={"ID":"a92a0a1d-f1be-43f6-b4eb-21fdb21382e5","Type":"ContainerDied","Data":"f1a5cde096855aedf953af8be248ffc46768f0fce0adfa082302df3755c5fc95"} Oct 11 04:10:22 crc kubenswrapper[4798]: I1011 04:10:22.949575 4798 scope.go:117] "RemoveContainer" containerID="3b7918d2f6333d1abe557bee879f25131a5924ae7ea1c71c5234a255f60ab422" Oct 11 04:10:22 crc kubenswrapper[4798]: I1011 04:10:22.950305 4798 generic.go:334] "Generic (PLEG): container finished" podID="8d972df7-8eff-4cc6-92a1-799f816bc198" containerID="48cb99dc4a4d9bef61b230298e4a87710b18b1ba49e0ab215ec5c1c6bc4f7963" exitCode=0 Oct 11 04:10:22 crc kubenswrapper[4798]: I1011 04:10:22.950714 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8554648995-lhhsb" event={"ID":"8d972df7-8eff-4cc6-92a1-799f816bc198","Type":"ContainerDied","Data":"48cb99dc4a4d9bef61b230298e4a87710b18b1ba49e0ab215ec5c1c6bc4f7963"} Oct 11 04:10:22 crc kubenswrapper[4798]: I1011 04:10:22.954743 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"2c3825ca-11d1-4b60-b0c5-2f4b80a7fd01","Type":"ContainerStarted","Data":"12f72c11a9bac380e566fd34cd8bfddd05654c96664642878894ccff3868dc6f"} Oct 11 04:10:23 crc kubenswrapper[4798]: I1011 04:10:23.105832 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6bc7876d45-krbz8"] Oct 11 04:10:23 crc kubenswrapper[4798]: I1011 04:10:23.112731 4798 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6bc7876d45-krbz8"] Oct 11 04:10:23 crc kubenswrapper[4798]: I1011 04:10:23.439612 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a92a0a1d-f1be-43f6-b4eb-21fdb21382e5" path="/var/lib/kubelet/pods/a92a0a1d-f1be-43f6-b4eb-21fdb21382e5/volumes" Oct 11 04:10:23 crc kubenswrapper[4798]: I1011 04:10:23.974318 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8554648995-lhhsb" event={"ID":"8d972df7-8eff-4cc6-92a1-799f816bc198","Type":"ContainerStarted","Data":"66e5712c05c3d0e3528cefaa70d49785b5d3d6f65c1e53b617ed5fffc130df03"} Oct 11 04:10:23 crc kubenswrapper[4798]: I1011 04:10:23.975584 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-8554648995-lhhsb" Oct 11 04:10:24 crc kubenswrapper[4798]: I1011 04:10:24.004757 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-8554648995-lhhsb" podStartSLOduration=4.004723341 podStartE2EDuration="4.004723341s" podCreationTimestamp="2025-10-11 04:10:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 04:10:24.001528515 +0000 UTC m=+919.337818211" watchObservedRunningTime="2025-10-11 04:10:24.004723341 +0000 UTC m=+919.341013037" Oct 11 04:10:27 crc kubenswrapper[4798]: I1011 04:10:27.138420 4798 patch_prober.go:28] interesting pod/machine-config-daemon-h28s2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 11 04:10:27 crc kubenswrapper[4798]: I1011 04:10:27.139557 4798 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 11 04:10:27 crc kubenswrapper[4798]: I1011 04:10:27.139621 4798 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" Oct 11 04:10:27 crc kubenswrapper[4798]: I1011 04:10:27.140577 4798 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"54c6a0004bcf04af0eb4f7beb3728e8dd11d3c9b035976cf54104ce60f0d2629"} pod="openshift-machine-config-operator/machine-config-daemon-h28s2" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Oct 11 04:10:27 crc kubenswrapper[4798]: I1011 04:10:27.140676 4798 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" containerName="machine-config-daemon" containerID="cri-o://54c6a0004bcf04af0eb4f7beb3728e8dd11d3c9b035976cf54104ce60f0d2629" gracePeriod=600 Oct 11 04:10:28 crc kubenswrapper[4798]: I1011 04:10:28.032077 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"2c3825ca-11d1-4b60-b0c5-2f4b80a7fd01","Type":"ContainerStarted","Data":"b780f9e66c6a59c3cb73a4a473843ea629eb607ea401ed48424e108a4c8ebbe3"} Oct 11 04:10:28 crc kubenswrapper[4798]: I1011 04:10:28.327845 4798 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-galera-0" Oct 11 04:10:28 crc kubenswrapper[4798]: I1011 04:10:28.327911 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-galera-0" Oct 11 04:10:28 crc kubenswrapper[4798]: I1011 04:10:28.399577 4798 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-galera-0" Oct 11 04:10:29 crc kubenswrapper[4798]: I1011 04:10:29.045052 4798 generic.go:334] "Generic (PLEG): container finished" podID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" containerID="54c6a0004bcf04af0eb4f7beb3728e8dd11d3c9b035976cf54104ce60f0d2629" exitCode=0 Oct 11 04:10:29 crc kubenswrapper[4798]: I1011 04:10:29.045104 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" event={"ID":"42571bc8-2186-4e3b-bba9-28f5a8f364d0","Type":"ContainerDied","Data":"54c6a0004bcf04af0eb4f7beb3728e8dd11d3c9b035976cf54104ce60f0d2629"} Oct 11 04:10:29 crc kubenswrapper[4798]: I1011 04:10:29.045842 4798 scope.go:117] "RemoveContainer" containerID="c3b0e40f46ff6d5e205dd95a80f3265b5b1e7509ae9d7aa46df649af185d464c" Oct 11 04:10:29 crc kubenswrapper[4798]: I1011 04:10:29.102673 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-galera-0" Oct 11 04:10:29 crc kubenswrapper[4798]: I1011 04:10:29.917686 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-create-l79jb"] Oct 11 04:10:29 crc kubenswrapper[4798]: E1011 04:10:29.918381 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a92a0a1d-f1be-43f6-b4eb-21fdb21382e5" containerName="init" Oct 11 04:10:29 crc kubenswrapper[4798]: I1011 04:10:29.918427 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="a92a0a1d-f1be-43f6-b4eb-21fdb21382e5" containerName="init" Oct 11 04:10:29 crc kubenswrapper[4798]: I1011 04:10:29.918659 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="a92a0a1d-f1be-43f6-b4eb-21fdb21382e5" containerName="init" Oct 11 04:10:29 crc kubenswrapper[4798]: I1011 04:10:29.919599 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-l79jb" Oct 11 04:10:29 crc kubenswrapper[4798]: I1011 04:10:29.927880 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-l79jb"] Oct 11 04:10:30 crc kubenswrapper[4798]: I1011 04:10:30.018829 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-create-2lxjs"] Oct 11 04:10:30 crc kubenswrapper[4798]: I1011 04:10:30.028267 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-2lxjs"] Oct 11 04:10:30 crc kubenswrapper[4798]: I1011 04:10:30.028475 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-2lxjs" Oct 11 04:10:30 crc kubenswrapper[4798]: I1011 04:10:30.047493 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7sp7m\" (UniqueName: \"kubernetes.io/projected/facccfd6-9383-4336-be2d-1f56b6f11d25-kube-api-access-7sp7m\") pod \"keystone-db-create-l79jb\" (UID: \"facccfd6-9383-4336-be2d-1f56b6f11d25\") " pod="openstack/keystone-db-create-l79jb" Oct 11 04:10:30 crc kubenswrapper[4798]: I1011 04:10:30.150486 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5p9kj\" (UniqueName: \"kubernetes.io/projected/92a46d10-49a4-46d5-8344-5d3d910eb5ca-kube-api-access-5p9kj\") pod \"placement-db-create-2lxjs\" (UID: \"92a46d10-49a4-46d5-8344-5d3d910eb5ca\") " pod="openstack/placement-db-create-2lxjs" Oct 11 04:10:30 crc kubenswrapper[4798]: I1011 04:10:30.150692 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7sp7m\" (UniqueName: \"kubernetes.io/projected/facccfd6-9383-4336-be2d-1f56b6f11d25-kube-api-access-7sp7m\") pod \"keystone-db-create-l79jb\" (UID: \"facccfd6-9383-4336-be2d-1f56b6f11d25\") " pod="openstack/keystone-db-create-l79jb" Oct 11 04:10:30 crc kubenswrapper[4798]: I1011 04:10:30.175817 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7sp7m\" (UniqueName: \"kubernetes.io/projected/facccfd6-9383-4336-be2d-1f56b6f11d25-kube-api-access-7sp7m\") pod \"keystone-db-create-l79jb\" (UID: \"facccfd6-9383-4336-be2d-1f56b6f11d25\") " pod="openstack/keystone-db-create-l79jb" Oct 11 04:10:30 crc kubenswrapper[4798]: I1011 04:10:30.253213 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5p9kj\" (UniqueName: \"kubernetes.io/projected/92a46d10-49a4-46d5-8344-5d3d910eb5ca-kube-api-access-5p9kj\") pod \"placement-db-create-2lxjs\" (UID: \"92a46d10-49a4-46d5-8344-5d3d910eb5ca\") " pod="openstack/placement-db-create-2lxjs" Oct 11 04:10:30 crc kubenswrapper[4798]: I1011 04:10:30.272370 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-l79jb" Oct 11 04:10:30 crc kubenswrapper[4798]: I1011 04:10:30.286479 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5p9kj\" (UniqueName: \"kubernetes.io/projected/92a46d10-49a4-46d5-8344-5d3d910eb5ca-kube-api-access-5p9kj\") pod \"placement-db-create-2lxjs\" (UID: \"92a46d10-49a4-46d5-8344-5d3d910eb5ca\") " pod="openstack/placement-db-create-2lxjs" Oct 11 04:10:30 crc kubenswrapper[4798]: I1011 04:10:30.348901 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-2lxjs" Oct 11 04:10:30 crc kubenswrapper[4798]: I1011 04:10:30.377167 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-create-h2g26"] Oct 11 04:10:30 crc kubenswrapper[4798]: I1011 04:10:30.378720 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-h2g26" Oct 11 04:10:30 crc kubenswrapper[4798]: I1011 04:10:30.412042 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-h2g26"] Oct 11 04:10:30 crc kubenswrapper[4798]: I1011 04:10:30.462587 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dx7lh\" (UniqueName: \"kubernetes.io/projected/4264e4c3-e92c-4335-948f-565d22a7ba21-kube-api-access-dx7lh\") pod \"glance-db-create-h2g26\" (UID: \"4264e4c3-e92c-4335-948f-565d22a7ba21\") " pod="openstack/glance-db-create-h2g26" Oct 11 04:10:30 crc kubenswrapper[4798]: I1011 04:10:30.568417 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dx7lh\" (UniqueName: \"kubernetes.io/projected/4264e4c3-e92c-4335-948f-565d22a7ba21-kube-api-access-dx7lh\") pod \"glance-db-create-h2g26\" (UID: \"4264e4c3-e92c-4335-948f-565d22a7ba21\") " pod="openstack/glance-db-create-h2g26" Oct 11 04:10:30 crc kubenswrapper[4798]: I1011 04:10:30.589571 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dx7lh\" (UniqueName: \"kubernetes.io/projected/4264e4c3-e92c-4335-948f-565d22a7ba21-kube-api-access-dx7lh\") pod \"glance-db-create-h2g26\" (UID: \"4264e4c3-e92c-4335-948f-565d22a7ba21\") " pod="openstack/glance-db-create-h2g26" Oct 11 04:10:30 crc kubenswrapper[4798]: I1011 04:10:30.698049 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-l79jb"] Oct 11 04:10:30 crc kubenswrapper[4798]: W1011 04:10:30.712275 4798 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podfacccfd6_9383_4336_be2d_1f56b6f11d25.slice/crio-3868c0860bea3e1e493acd863eab312d449c9b4885cd95cfba330648bb4b15d6 WatchSource:0}: Error finding container 3868c0860bea3e1e493acd863eab312d449c9b4885cd95cfba330648bb4b15d6: Status 404 returned error can't find the container with id 3868c0860bea3e1e493acd863eab312d449c9b4885cd95cfba330648bb4b15d6 Oct 11 04:10:30 crc kubenswrapper[4798]: I1011 04:10:30.818459 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-h2g26" Oct 11 04:10:30 crc kubenswrapper[4798]: I1011 04:10:30.836074 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-2lxjs"] Oct 11 04:10:30 crc kubenswrapper[4798]: W1011 04:10:30.853983 4798 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod92a46d10_49a4_46d5_8344_5d3d910eb5ca.slice/crio-0569636b9ebe84dbfeaacaec831abc303fcccff93bf9ced72118ecf78e61cbcc WatchSource:0}: Error finding container 0569636b9ebe84dbfeaacaec831abc303fcccff93bf9ced72118ecf78e61cbcc: Status 404 returned error can't find the container with id 0569636b9ebe84dbfeaacaec831abc303fcccff93bf9ced72118ecf78e61cbcc Oct 11 04:10:31 crc kubenswrapper[4798]: I1011 04:10:31.064777 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-2lxjs" event={"ID":"92a46d10-49a4-46d5-8344-5d3d910eb5ca","Type":"ContainerStarted","Data":"0569636b9ebe84dbfeaacaec831abc303fcccff93bf9ced72118ecf78e61cbcc"} Oct 11 04:10:31 crc kubenswrapper[4798]: I1011 04:10:31.066895 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-l79jb" event={"ID":"facccfd6-9383-4336-be2d-1f56b6f11d25","Type":"ContainerStarted","Data":"3868c0860bea3e1e493acd863eab312d449c9b4885cd95cfba330648bb4b15d6"} Oct 11 04:10:31 crc kubenswrapper[4798]: I1011 04:10:31.108103 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-h2g26"] Oct 11 04:10:31 crc kubenswrapper[4798]: W1011 04:10:31.109744 4798 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4264e4c3_e92c_4335_948f_565d22a7ba21.slice/crio-b2c1a34a8833a7b3c0c49e998ec7b0222457f2e62d7b07c959dc1732fbf10f56 WatchSource:0}: Error finding container b2c1a34a8833a7b3c0c49e998ec7b0222457f2e62d7b07c959dc1732fbf10f56: Status 404 returned error can't find the container with id b2c1a34a8833a7b3c0c49e998ec7b0222457f2e62d7b07c959dc1732fbf10f56 Oct 11 04:10:31 crc kubenswrapper[4798]: I1011 04:10:31.137559 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-8554648995-lhhsb" Oct 11 04:10:31 crc kubenswrapper[4798]: I1011 04:10:31.232901 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-wcmbz"] Oct 11 04:10:31 crc kubenswrapper[4798]: I1011 04:10:31.233228 4798 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-57d769cc4f-wcmbz" podUID="ef82cf1a-27d8-4ce1-82ca-5143fb76af9a" containerName="dnsmasq-dns" containerID="cri-o://de0f0f6e306be045068b75d455a1a4e29ab065a7e48d0a8af6d89c0145f76353" gracePeriod=10 Oct 11 04:10:32 crc kubenswrapper[4798]: I1011 04:10:32.106048 4798 generic.go:334] "Generic (PLEG): container finished" podID="ef82cf1a-27d8-4ce1-82ca-5143fb76af9a" containerID="de0f0f6e306be045068b75d455a1a4e29ab065a7e48d0a8af6d89c0145f76353" exitCode=0 Oct 11 04:10:32 crc kubenswrapper[4798]: I1011 04:10:32.106138 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-wcmbz" event={"ID":"ef82cf1a-27d8-4ce1-82ca-5143fb76af9a","Type":"ContainerDied","Data":"de0f0f6e306be045068b75d455a1a4e29ab065a7e48d0a8af6d89c0145f76353"} Oct 11 04:10:32 crc kubenswrapper[4798]: I1011 04:10:32.111102 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-h2g26" event={"ID":"4264e4c3-e92c-4335-948f-565d22a7ba21","Type":"ContainerStarted","Data":"cc6b6ff6d9199dcfcff6d49580de3753d5fd1a5c6ff619f25bda77015544b2d2"} Oct 11 04:10:32 crc kubenswrapper[4798]: I1011 04:10:32.111240 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-h2g26" event={"ID":"4264e4c3-e92c-4335-948f-565d22a7ba21","Type":"ContainerStarted","Data":"b2c1a34a8833a7b3c0c49e998ec7b0222457f2e62d7b07c959dc1732fbf10f56"} Oct 11 04:10:32 crc kubenswrapper[4798]: I1011 04:10:32.112515 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-2lxjs" event={"ID":"92a46d10-49a4-46d5-8344-5d3d910eb5ca","Type":"ContainerStarted","Data":"36ea9392bc0f636943ef337d2bfca61b4e8a9c22b9ee8ef3e09b321121467d41"} Oct 11 04:10:32 crc kubenswrapper[4798]: I1011 04:10:32.114063 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-l79jb" event={"ID":"facccfd6-9383-4336-be2d-1f56b6f11d25","Type":"ContainerStarted","Data":"4ceedb7600d54728d2a4a22a17156a98f8281a97f5bebe41efa9f8c115b486be"} Oct 11 04:10:32 crc kubenswrapper[4798]: I1011 04:10:32.135297 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-db-create-l79jb" podStartSLOduration=3.135269797 podStartE2EDuration="3.135269797s" podCreationTimestamp="2025-10-11 04:10:29 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 04:10:32.128238929 +0000 UTC m=+927.464528615" watchObservedRunningTime="2025-10-11 04:10:32.135269797 +0000 UTC m=+927.471559483" Oct 11 04:10:32 crc kubenswrapper[4798]: I1011 04:10:32.575763 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-wcmbz" Oct 11 04:10:32 crc kubenswrapper[4798]: I1011 04:10:32.713243 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-htmsh\" (UniqueName: \"kubernetes.io/projected/ef82cf1a-27d8-4ce1-82ca-5143fb76af9a-kube-api-access-htmsh\") pod \"ef82cf1a-27d8-4ce1-82ca-5143fb76af9a\" (UID: \"ef82cf1a-27d8-4ce1-82ca-5143fb76af9a\") " Oct 11 04:10:32 crc kubenswrapper[4798]: I1011 04:10:32.713837 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ef82cf1a-27d8-4ce1-82ca-5143fb76af9a-config\") pod \"ef82cf1a-27d8-4ce1-82ca-5143fb76af9a\" (UID: \"ef82cf1a-27d8-4ce1-82ca-5143fb76af9a\") " Oct 11 04:10:32 crc kubenswrapper[4798]: I1011 04:10:32.713956 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ef82cf1a-27d8-4ce1-82ca-5143fb76af9a-dns-svc\") pod \"ef82cf1a-27d8-4ce1-82ca-5143fb76af9a\" (UID: \"ef82cf1a-27d8-4ce1-82ca-5143fb76af9a\") " Oct 11 04:10:32 crc kubenswrapper[4798]: I1011 04:10:32.719703 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ef82cf1a-27d8-4ce1-82ca-5143fb76af9a-kube-api-access-htmsh" (OuterVolumeSpecName: "kube-api-access-htmsh") pod "ef82cf1a-27d8-4ce1-82ca-5143fb76af9a" (UID: "ef82cf1a-27d8-4ce1-82ca-5143fb76af9a"). InnerVolumeSpecName "kube-api-access-htmsh". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 04:10:32 crc kubenswrapper[4798]: I1011 04:10:32.751956 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ef82cf1a-27d8-4ce1-82ca-5143fb76af9a-config" (OuterVolumeSpecName: "config") pod "ef82cf1a-27d8-4ce1-82ca-5143fb76af9a" (UID: "ef82cf1a-27d8-4ce1-82ca-5143fb76af9a"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 04:10:32 crc kubenswrapper[4798]: I1011 04:10:32.752516 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ef82cf1a-27d8-4ce1-82ca-5143fb76af9a-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "ef82cf1a-27d8-4ce1-82ca-5143fb76af9a" (UID: "ef82cf1a-27d8-4ce1-82ca-5143fb76af9a"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 04:10:32 crc kubenswrapper[4798]: I1011 04:10:32.815537 4798 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ef82cf1a-27d8-4ce1-82ca-5143fb76af9a-dns-svc\") on node \"crc\" DevicePath \"\"" Oct 11 04:10:32 crc kubenswrapper[4798]: I1011 04:10:32.815581 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-htmsh\" (UniqueName: \"kubernetes.io/projected/ef82cf1a-27d8-4ce1-82ca-5143fb76af9a-kube-api-access-htmsh\") on node \"crc\" DevicePath \"\"" Oct 11 04:10:32 crc kubenswrapper[4798]: I1011 04:10:32.815600 4798 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ef82cf1a-27d8-4ce1-82ca-5143fb76af9a-config\") on node \"crc\" DevicePath \"\"" Oct 11 04:10:33 crc kubenswrapper[4798]: I1011 04:10:33.043843 4798 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-cell1-galera-0" Oct 11 04:10:33 crc kubenswrapper[4798]: I1011 04:10:33.109503 4798 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/openstack-cell1-galera-0" podUID="2d1fad00-1405-4149-96d6-7ef60d34c4f1" containerName="galera" probeResult="failure" output=< Oct 11 04:10:33 crc kubenswrapper[4798]: wsrep_local_state_comment (Joined) differs from Synced Oct 11 04:10:33 crc kubenswrapper[4798]: > Oct 11 04:10:33 crc kubenswrapper[4798]: I1011 04:10:33.125658 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-wcmbz" event={"ID":"ef82cf1a-27d8-4ce1-82ca-5143fb76af9a","Type":"ContainerDied","Data":"0d64bad6f40466eda84f07a86d4a705b6b13e44710789e4d585dcd8269fb286b"} Oct 11 04:10:33 crc kubenswrapper[4798]: I1011 04:10:33.125916 4798 scope.go:117] "RemoveContainer" containerID="de0f0f6e306be045068b75d455a1a4e29ab065a7e48d0a8af6d89c0145f76353" Oct 11 04:10:33 crc kubenswrapper[4798]: I1011 04:10:33.126075 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-wcmbz" Oct 11 04:10:33 crc kubenswrapper[4798]: I1011 04:10:33.128062 4798 generic.go:334] "Generic (PLEG): container finished" podID="4264e4c3-e92c-4335-948f-565d22a7ba21" containerID="cc6b6ff6d9199dcfcff6d49580de3753d5fd1a5c6ff619f25bda77015544b2d2" exitCode=0 Oct 11 04:10:33 crc kubenswrapper[4798]: I1011 04:10:33.128120 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-h2g26" event={"ID":"4264e4c3-e92c-4335-948f-565d22a7ba21","Type":"ContainerDied","Data":"cc6b6ff6d9199dcfcff6d49580de3753d5fd1a5c6ff619f25bda77015544b2d2"} Oct 11 04:10:33 crc kubenswrapper[4798]: I1011 04:10:33.130531 4798 generic.go:334] "Generic (PLEG): container finished" podID="92a46d10-49a4-46d5-8344-5d3d910eb5ca" containerID="36ea9392bc0f636943ef337d2bfca61b4e8a9c22b9ee8ef3e09b321121467d41" exitCode=0 Oct 11 04:10:33 crc kubenswrapper[4798]: I1011 04:10:33.130691 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-2lxjs" event={"ID":"92a46d10-49a4-46d5-8344-5d3d910eb5ca","Type":"ContainerDied","Data":"36ea9392bc0f636943ef337d2bfca61b4e8a9c22b9ee8ef3e09b321121467d41"} Oct 11 04:10:33 crc kubenswrapper[4798]: I1011 04:10:33.136377 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"2c3825ca-11d1-4b60-b0c5-2f4b80a7fd01","Type":"ContainerStarted","Data":"f114f7d1b3eadec3ee0891a3e941b7ba98d69c88f2d4366ec2af8550aeed8220"} Oct 11 04:10:33 crc kubenswrapper[4798]: I1011 04:10:33.136570 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-northd-0" Oct 11 04:10:33 crc kubenswrapper[4798]: I1011 04:10:33.138431 4798 generic.go:334] "Generic (PLEG): container finished" podID="facccfd6-9383-4336-be2d-1f56b6f11d25" containerID="4ceedb7600d54728d2a4a22a17156a98f8281a97f5bebe41efa9f8c115b486be" exitCode=0 Oct 11 04:10:33 crc kubenswrapper[4798]: I1011 04:10:33.138500 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-l79jb" event={"ID":"facccfd6-9383-4336-be2d-1f56b6f11d25","Type":"ContainerDied","Data":"4ceedb7600d54728d2a4a22a17156a98f8281a97f5bebe41efa9f8c115b486be"} Oct 11 04:10:33 crc kubenswrapper[4798]: I1011 04:10:33.142293 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" event={"ID":"42571bc8-2186-4e3b-bba9-28f5a8f364d0","Type":"ContainerStarted","Data":"d3a7cc4feeb865c985e6b8c7245a1d16577ae082c25541425e42b8ef5e8c15f6"} Oct 11 04:10:33 crc kubenswrapper[4798]: I1011 04:10:33.147969 4798 scope.go:117] "RemoveContainer" containerID="69416d927d550b02a94e02630f0db4c392c65866d74faaa40915b68db26abbdb" Oct 11 04:10:33 crc kubenswrapper[4798]: I1011 04:10:33.200787 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-northd-0" podStartSLOduration=8.406099558 podStartE2EDuration="12.200771047s" podCreationTimestamp="2025-10-11 04:10:21 +0000 UTC" firstStartedPulling="2025-10-11 04:10:22.188222859 +0000 UTC m=+917.524512565" lastFinishedPulling="2025-10-11 04:10:25.982894348 +0000 UTC m=+921.319184054" observedRunningTime="2025-10-11 04:10:33.185139104 +0000 UTC m=+928.521428790" watchObservedRunningTime="2025-10-11 04:10:33.200771047 +0000 UTC m=+928.537060733" Oct 11 04:10:33 crc kubenswrapper[4798]: I1011 04:10:33.260375 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-wcmbz"] Oct 11 04:10:33 crc kubenswrapper[4798]: I1011 04:10:33.281606 4798 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-wcmbz"] Oct 11 04:10:33 crc kubenswrapper[4798]: I1011 04:10:33.432169 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ef82cf1a-27d8-4ce1-82ca-5143fb76af9a" path="/var/lib/kubelet/pods/ef82cf1a-27d8-4ce1-82ca-5143fb76af9a/volumes" Oct 11 04:10:34 crc kubenswrapper[4798]: I1011 04:10:34.607938 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-h2g26" Oct 11 04:10:34 crc kubenswrapper[4798]: I1011 04:10:34.617479 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-l79jb" Oct 11 04:10:34 crc kubenswrapper[4798]: I1011 04:10:34.629823 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-2lxjs" Oct 11 04:10:34 crc kubenswrapper[4798]: I1011 04:10:34.750028 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dx7lh\" (UniqueName: \"kubernetes.io/projected/4264e4c3-e92c-4335-948f-565d22a7ba21-kube-api-access-dx7lh\") pod \"4264e4c3-e92c-4335-948f-565d22a7ba21\" (UID: \"4264e4c3-e92c-4335-948f-565d22a7ba21\") " Oct 11 04:10:34 crc kubenswrapper[4798]: I1011 04:10:34.750190 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5p9kj\" (UniqueName: \"kubernetes.io/projected/92a46d10-49a4-46d5-8344-5d3d910eb5ca-kube-api-access-5p9kj\") pod \"92a46d10-49a4-46d5-8344-5d3d910eb5ca\" (UID: \"92a46d10-49a4-46d5-8344-5d3d910eb5ca\") " Oct 11 04:10:34 crc kubenswrapper[4798]: I1011 04:10:34.750254 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7sp7m\" (UniqueName: \"kubernetes.io/projected/facccfd6-9383-4336-be2d-1f56b6f11d25-kube-api-access-7sp7m\") pod \"facccfd6-9383-4336-be2d-1f56b6f11d25\" (UID: \"facccfd6-9383-4336-be2d-1f56b6f11d25\") " Oct 11 04:10:34 crc kubenswrapper[4798]: I1011 04:10:34.765530 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/92a46d10-49a4-46d5-8344-5d3d910eb5ca-kube-api-access-5p9kj" (OuterVolumeSpecName: "kube-api-access-5p9kj") pod "92a46d10-49a4-46d5-8344-5d3d910eb5ca" (UID: "92a46d10-49a4-46d5-8344-5d3d910eb5ca"). InnerVolumeSpecName "kube-api-access-5p9kj". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 04:10:34 crc kubenswrapper[4798]: I1011 04:10:34.767880 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4264e4c3-e92c-4335-948f-565d22a7ba21-kube-api-access-dx7lh" (OuterVolumeSpecName: "kube-api-access-dx7lh") pod "4264e4c3-e92c-4335-948f-565d22a7ba21" (UID: "4264e4c3-e92c-4335-948f-565d22a7ba21"). InnerVolumeSpecName "kube-api-access-dx7lh". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 04:10:34 crc kubenswrapper[4798]: I1011 04:10:34.784057 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/facccfd6-9383-4336-be2d-1f56b6f11d25-kube-api-access-7sp7m" (OuterVolumeSpecName: "kube-api-access-7sp7m") pod "facccfd6-9383-4336-be2d-1f56b6f11d25" (UID: "facccfd6-9383-4336-be2d-1f56b6f11d25"). InnerVolumeSpecName "kube-api-access-7sp7m". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 04:10:34 crc kubenswrapper[4798]: I1011 04:10:34.852383 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dx7lh\" (UniqueName: \"kubernetes.io/projected/4264e4c3-e92c-4335-948f-565d22a7ba21-kube-api-access-dx7lh\") on node \"crc\" DevicePath \"\"" Oct 11 04:10:34 crc kubenswrapper[4798]: I1011 04:10:34.852434 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5p9kj\" (UniqueName: \"kubernetes.io/projected/92a46d10-49a4-46d5-8344-5d3d910eb5ca-kube-api-access-5p9kj\") on node \"crc\" DevicePath \"\"" Oct 11 04:10:34 crc kubenswrapper[4798]: I1011 04:10:34.852444 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7sp7m\" (UniqueName: \"kubernetes.io/projected/facccfd6-9383-4336-be2d-1f56b6f11d25-kube-api-access-7sp7m\") on node \"crc\" DevicePath \"\"" Oct 11 04:10:35 crc kubenswrapper[4798]: I1011 04:10:35.157981 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-l79jb" event={"ID":"facccfd6-9383-4336-be2d-1f56b6f11d25","Type":"ContainerDied","Data":"3868c0860bea3e1e493acd863eab312d449c9b4885cd95cfba330648bb4b15d6"} Oct 11 04:10:35 crc kubenswrapper[4798]: I1011 04:10:35.158027 4798 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3868c0860bea3e1e493acd863eab312d449c9b4885cd95cfba330648bb4b15d6" Oct 11 04:10:35 crc kubenswrapper[4798]: I1011 04:10:35.158094 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-l79jb" Oct 11 04:10:35 crc kubenswrapper[4798]: I1011 04:10:35.160227 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-h2g26" event={"ID":"4264e4c3-e92c-4335-948f-565d22a7ba21","Type":"ContainerDied","Data":"b2c1a34a8833a7b3c0c49e998ec7b0222457f2e62d7b07c959dc1732fbf10f56"} Oct 11 04:10:35 crc kubenswrapper[4798]: I1011 04:10:35.160262 4798 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b2c1a34a8833a7b3c0c49e998ec7b0222457f2e62d7b07c959dc1732fbf10f56" Oct 11 04:10:35 crc kubenswrapper[4798]: I1011 04:10:35.160310 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-h2g26" Oct 11 04:10:35 crc kubenswrapper[4798]: I1011 04:10:35.163642 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-2lxjs" event={"ID":"92a46d10-49a4-46d5-8344-5d3d910eb5ca","Type":"ContainerDied","Data":"0569636b9ebe84dbfeaacaec831abc303fcccff93bf9ced72118ecf78e61cbcc"} Oct 11 04:10:35 crc kubenswrapper[4798]: I1011 04:10:35.163682 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-2lxjs" Oct 11 04:10:35 crc kubenswrapper[4798]: I1011 04:10:35.163688 4798 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0569636b9ebe84dbfeaacaec831abc303fcccff93bf9ced72118ecf78e61cbcc" Oct 11 04:10:39 crc kubenswrapper[4798]: I1011 04:10:39.823561 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-cell1-galera-0" Oct 11 04:10:40 crc kubenswrapper[4798]: I1011 04:10:40.149689 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-d706-account-create-sb7b5"] Oct 11 04:10:40 crc kubenswrapper[4798]: E1011 04:10:40.150144 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ef82cf1a-27d8-4ce1-82ca-5143fb76af9a" containerName="dnsmasq-dns" Oct 11 04:10:40 crc kubenswrapper[4798]: I1011 04:10:40.150167 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="ef82cf1a-27d8-4ce1-82ca-5143fb76af9a" containerName="dnsmasq-dns" Oct 11 04:10:40 crc kubenswrapper[4798]: E1011 04:10:40.150177 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="92a46d10-49a4-46d5-8344-5d3d910eb5ca" containerName="mariadb-database-create" Oct 11 04:10:40 crc kubenswrapper[4798]: I1011 04:10:40.150183 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="92a46d10-49a4-46d5-8344-5d3d910eb5ca" containerName="mariadb-database-create" Oct 11 04:10:40 crc kubenswrapper[4798]: E1011 04:10:40.150203 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ef82cf1a-27d8-4ce1-82ca-5143fb76af9a" containerName="init" Oct 11 04:10:40 crc kubenswrapper[4798]: I1011 04:10:40.150212 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="ef82cf1a-27d8-4ce1-82ca-5143fb76af9a" containerName="init" Oct 11 04:10:40 crc kubenswrapper[4798]: E1011 04:10:40.150240 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="facccfd6-9383-4336-be2d-1f56b6f11d25" containerName="mariadb-database-create" Oct 11 04:10:40 crc kubenswrapper[4798]: I1011 04:10:40.150246 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="facccfd6-9383-4336-be2d-1f56b6f11d25" containerName="mariadb-database-create" Oct 11 04:10:40 crc kubenswrapper[4798]: E1011 04:10:40.150255 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4264e4c3-e92c-4335-948f-565d22a7ba21" containerName="mariadb-database-create" Oct 11 04:10:40 crc kubenswrapper[4798]: I1011 04:10:40.150261 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="4264e4c3-e92c-4335-948f-565d22a7ba21" containerName="mariadb-database-create" Oct 11 04:10:40 crc kubenswrapper[4798]: I1011 04:10:40.150494 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="facccfd6-9383-4336-be2d-1f56b6f11d25" containerName="mariadb-database-create" Oct 11 04:10:40 crc kubenswrapper[4798]: I1011 04:10:40.150518 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="4264e4c3-e92c-4335-948f-565d22a7ba21" containerName="mariadb-database-create" Oct 11 04:10:40 crc kubenswrapper[4798]: I1011 04:10:40.150528 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="92a46d10-49a4-46d5-8344-5d3d910eb5ca" containerName="mariadb-database-create" Oct 11 04:10:40 crc kubenswrapper[4798]: I1011 04:10:40.150542 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="ef82cf1a-27d8-4ce1-82ca-5143fb76af9a" containerName="dnsmasq-dns" Oct 11 04:10:40 crc kubenswrapper[4798]: I1011 04:10:40.151310 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-d706-account-create-sb7b5" Oct 11 04:10:40 crc kubenswrapper[4798]: I1011 04:10:40.159160 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-d706-account-create-sb7b5"] Oct 11 04:10:40 crc kubenswrapper[4798]: I1011 04:10:40.159452 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-db-secret" Oct 11 04:10:40 crc kubenswrapper[4798]: I1011 04:10:40.255250 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-txnjx\" (UniqueName: \"kubernetes.io/projected/23d21fa0-4e91-45e1-b694-8fc3d526b0e8-kube-api-access-txnjx\") pod \"placement-d706-account-create-sb7b5\" (UID: \"23d21fa0-4e91-45e1-b694-8fc3d526b0e8\") " pod="openstack/placement-d706-account-create-sb7b5" Oct 11 04:10:40 crc kubenswrapper[4798]: I1011 04:10:40.356618 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-txnjx\" (UniqueName: \"kubernetes.io/projected/23d21fa0-4e91-45e1-b694-8fc3d526b0e8-kube-api-access-txnjx\") pod \"placement-d706-account-create-sb7b5\" (UID: \"23d21fa0-4e91-45e1-b694-8fc3d526b0e8\") " pod="openstack/placement-d706-account-create-sb7b5" Oct 11 04:10:40 crc kubenswrapper[4798]: I1011 04:10:40.373936 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-txnjx\" (UniqueName: \"kubernetes.io/projected/23d21fa0-4e91-45e1-b694-8fc3d526b0e8-kube-api-access-txnjx\") pod \"placement-d706-account-create-sb7b5\" (UID: \"23d21fa0-4e91-45e1-b694-8fc3d526b0e8\") " pod="openstack/placement-d706-account-create-sb7b5" Oct 11 04:10:40 crc kubenswrapper[4798]: I1011 04:10:40.471801 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-d706-account-create-sb7b5" Oct 11 04:10:40 crc kubenswrapper[4798]: I1011 04:10:40.564261 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-e33a-account-create-b6g6r"] Oct 11 04:10:40 crc kubenswrapper[4798]: I1011 04:10:40.573776 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-e33a-account-create-b6g6r" Oct 11 04:10:40 crc kubenswrapper[4798]: I1011 04:10:40.577033 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-db-secret" Oct 11 04:10:40 crc kubenswrapper[4798]: I1011 04:10:40.581822 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-e33a-account-create-b6g6r"] Oct 11 04:10:40 crc kubenswrapper[4798]: I1011 04:10:40.670671 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mhr8d\" (UniqueName: \"kubernetes.io/projected/b194c956-398d-4e4d-9ada-7ac9a0323720-kube-api-access-mhr8d\") pod \"glance-e33a-account-create-b6g6r\" (UID: \"b194c956-398d-4e4d-9ada-7ac9a0323720\") " pod="openstack/glance-e33a-account-create-b6g6r" Oct 11 04:10:40 crc kubenswrapper[4798]: I1011 04:10:40.745102 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-d706-account-create-sb7b5"] Oct 11 04:10:40 crc kubenswrapper[4798]: I1011 04:10:40.772901 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mhr8d\" (UniqueName: \"kubernetes.io/projected/b194c956-398d-4e4d-9ada-7ac9a0323720-kube-api-access-mhr8d\") pod \"glance-e33a-account-create-b6g6r\" (UID: \"b194c956-398d-4e4d-9ada-7ac9a0323720\") " pod="openstack/glance-e33a-account-create-b6g6r" Oct 11 04:10:40 crc kubenswrapper[4798]: I1011 04:10:40.792054 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mhr8d\" (UniqueName: \"kubernetes.io/projected/b194c956-398d-4e4d-9ada-7ac9a0323720-kube-api-access-mhr8d\") pod \"glance-e33a-account-create-b6g6r\" (UID: \"b194c956-398d-4e4d-9ada-7ac9a0323720\") " pod="openstack/glance-e33a-account-create-b6g6r" Oct 11 04:10:40 crc kubenswrapper[4798]: I1011 04:10:40.902638 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-e33a-account-create-b6g6r" Oct 11 04:10:41 crc kubenswrapper[4798]: I1011 04:10:41.215869 4798 generic.go:334] "Generic (PLEG): container finished" podID="23d21fa0-4e91-45e1-b694-8fc3d526b0e8" containerID="4d04877a44dcc808f21667a8f3a3ad15f2784e8d8a7764f30c95b48d3117dcf5" exitCode=0 Oct 11 04:10:41 crc kubenswrapper[4798]: I1011 04:10:41.216090 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-d706-account-create-sb7b5" event={"ID":"23d21fa0-4e91-45e1-b694-8fc3d526b0e8","Type":"ContainerDied","Data":"4d04877a44dcc808f21667a8f3a3ad15f2784e8d8a7764f30c95b48d3117dcf5"} Oct 11 04:10:41 crc kubenswrapper[4798]: I1011 04:10:41.216245 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-d706-account-create-sb7b5" event={"ID":"23d21fa0-4e91-45e1-b694-8fc3d526b0e8","Type":"ContainerStarted","Data":"f3392c839861fe5070010c2fc3b0013c8da118c5e2795209207f7d5e67308584"} Oct 11 04:10:41 crc kubenswrapper[4798]: I1011 04:10:41.318509 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-e33a-account-create-b6g6r"] Oct 11 04:10:41 crc kubenswrapper[4798]: W1011 04:10:41.323491 4798 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb194c956_398d_4e4d_9ada_7ac9a0323720.slice/crio-df75d53298a7beb745cef0de11e88bd31215200aeebaabe2fb6e662ca73270a8 WatchSource:0}: Error finding container df75d53298a7beb745cef0de11e88bd31215200aeebaabe2fb6e662ca73270a8: Status 404 returned error can't find the container with id df75d53298a7beb745cef0de11e88bd31215200aeebaabe2fb6e662ca73270a8 Oct 11 04:10:41 crc kubenswrapper[4798]: I1011 04:10:41.702575 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-northd-0" Oct 11 04:10:42 crc kubenswrapper[4798]: I1011 04:10:42.225483 4798 generic.go:334] "Generic (PLEG): container finished" podID="b194c956-398d-4e4d-9ada-7ac9a0323720" containerID="0b2f0cecf81461c70c1e403529c71c36026975d11053b8ff269926c43ceb906d" exitCode=0 Oct 11 04:10:42 crc kubenswrapper[4798]: I1011 04:10:42.225530 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-e33a-account-create-b6g6r" event={"ID":"b194c956-398d-4e4d-9ada-7ac9a0323720","Type":"ContainerDied","Data":"0b2f0cecf81461c70c1e403529c71c36026975d11053b8ff269926c43ceb906d"} Oct 11 04:10:42 crc kubenswrapper[4798]: I1011 04:10:42.225858 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-e33a-account-create-b6g6r" event={"ID":"b194c956-398d-4e4d-9ada-7ac9a0323720","Type":"ContainerStarted","Data":"df75d53298a7beb745cef0de11e88bd31215200aeebaabe2fb6e662ca73270a8"} Oct 11 04:10:42 crc kubenswrapper[4798]: I1011 04:10:42.536003 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-d706-account-create-sb7b5" Oct 11 04:10:42 crc kubenswrapper[4798]: I1011 04:10:42.699903 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-txnjx\" (UniqueName: \"kubernetes.io/projected/23d21fa0-4e91-45e1-b694-8fc3d526b0e8-kube-api-access-txnjx\") pod \"23d21fa0-4e91-45e1-b694-8fc3d526b0e8\" (UID: \"23d21fa0-4e91-45e1-b694-8fc3d526b0e8\") " Oct 11 04:10:42 crc kubenswrapper[4798]: I1011 04:10:42.705564 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/23d21fa0-4e91-45e1-b694-8fc3d526b0e8-kube-api-access-txnjx" (OuterVolumeSpecName: "kube-api-access-txnjx") pod "23d21fa0-4e91-45e1-b694-8fc3d526b0e8" (UID: "23d21fa0-4e91-45e1-b694-8fc3d526b0e8"). InnerVolumeSpecName "kube-api-access-txnjx". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 04:10:42 crc kubenswrapper[4798]: I1011 04:10:42.801865 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-txnjx\" (UniqueName: \"kubernetes.io/projected/23d21fa0-4e91-45e1-b694-8fc3d526b0e8-kube-api-access-txnjx\") on node \"crc\" DevicePath \"\"" Oct 11 04:10:43 crc kubenswrapper[4798]: I1011 04:10:43.242728 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-d706-account-create-sb7b5" Oct 11 04:10:43 crc kubenswrapper[4798]: I1011 04:10:43.244502 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-d706-account-create-sb7b5" event={"ID":"23d21fa0-4e91-45e1-b694-8fc3d526b0e8","Type":"ContainerDied","Data":"f3392c839861fe5070010c2fc3b0013c8da118c5e2795209207f7d5e67308584"} Oct 11 04:10:43 crc kubenswrapper[4798]: I1011 04:10:43.244544 4798 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f3392c839861fe5070010c2fc3b0013c8da118c5e2795209207f7d5e67308584" Oct 11 04:10:43 crc kubenswrapper[4798]: I1011 04:10:43.541306 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-e33a-account-create-b6g6r" Oct 11 04:10:43 crc kubenswrapper[4798]: I1011 04:10:43.620486 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mhr8d\" (UniqueName: \"kubernetes.io/projected/b194c956-398d-4e4d-9ada-7ac9a0323720-kube-api-access-mhr8d\") pod \"b194c956-398d-4e4d-9ada-7ac9a0323720\" (UID: \"b194c956-398d-4e4d-9ada-7ac9a0323720\") " Oct 11 04:10:43 crc kubenswrapper[4798]: I1011 04:10:43.625249 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b194c956-398d-4e4d-9ada-7ac9a0323720-kube-api-access-mhr8d" (OuterVolumeSpecName: "kube-api-access-mhr8d") pod "b194c956-398d-4e4d-9ada-7ac9a0323720" (UID: "b194c956-398d-4e4d-9ada-7ac9a0323720"). InnerVolumeSpecName "kube-api-access-mhr8d". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 04:10:43 crc kubenswrapper[4798]: I1011 04:10:43.722644 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mhr8d\" (UniqueName: \"kubernetes.io/projected/b194c956-398d-4e4d-9ada-7ac9a0323720-kube-api-access-mhr8d\") on node \"crc\" DevicePath \"\"" Oct 11 04:10:44 crc kubenswrapper[4798]: I1011 04:10:44.252121 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-e33a-account-create-b6g6r" event={"ID":"b194c956-398d-4e4d-9ada-7ac9a0323720","Type":"ContainerDied","Data":"df75d53298a7beb745cef0de11e88bd31215200aeebaabe2fb6e662ca73270a8"} Oct 11 04:10:44 crc kubenswrapper[4798]: I1011 04:10:44.252561 4798 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="df75d53298a7beb745cef0de11e88bd31215200aeebaabe2fb6e662ca73270a8" Oct 11 04:10:44 crc kubenswrapper[4798]: I1011 04:10:44.252157 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-e33a-account-create-b6g6r" Oct 11 04:10:45 crc kubenswrapper[4798]: I1011 04:10:45.696604 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-sync-tlsp9"] Oct 11 04:10:45 crc kubenswrapper[4798]: E1011 04:10:45.696936 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b194c956-398d-4e4d-9ada-7ac9a0323720" containerName="mariadb-account-create" Oct 11 04:10:45 crc kubenswrapper[4798]: I1011 04:10:45.696970 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="b194c956-398d-4e4d-9ada-7ac9a0323720" containerName="mariadb-account-create" Oct 11 04:10:45 crc kubenswrapper[4798]: E1011 04:10:45.696983 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="23d21fa0-4e91-45e1-b694-8fc3d526b0e8" containerName="mariadb-account-create" Oct 11 04:10:45 crc kubenswrapper[4798]: I1011 04:10:45.696990 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="23d21fa0-4e91-45e1-b694-8fc3d526b0e8" containerName="mariadb-account-create" Oct 11 04:10:45 crc kubenswrapper[4798]: I1011 04:10:45.697188 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="b194c956-398d-4e4d-9ada-7ac9a0323720" containerName="mariadb-account-create" Oct 11 04:10:45 crc kubenswrapper[4798]: I1011 04:10:45.697217 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="23d21fa0-4e91-45e1-b694-8fc3d526b0e8" containerName="mariadb-account-create" Oct 11 04:10:45 crc kubenswrapper[4798]: I1011 04:10:45.697803 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-tlsp9" Oct 11 04:10:45 crc kubenswrapper[4798]: I1011 04:10:45.699737 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-config-data" Oct 11 04:10:45 crc kubenswrapper[4798]: I1011 04:10:45.704477 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-k5vlw" Oct 11 04:10:45 crc kubenswrapper[4798]: I1011 04:10:45.715344 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-tlsp9"] Oct 11 04:10:45 crc kubenswrapper[4798]: I1011 04:10:45.805549 4798 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-sv678" podUID="3e260c01-682b-4b79-9ebe-c06c29750bfe" containerName="ovn-controller" probeResult="failure" output=< Oct 11 04:10:45 crc kubenswrapper[4798]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Oct 11 04:10:45 crc kubenswrapper[4798]: > Oct 11 04:10:45 crc kubenswrapper[4798]: I1011 04:10:45.860762 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/916c6d8b-b83d-4f53-8bfd-7e8c8edbb2ad-combined-ca-bundle\") pod \"glance-db-sync-tlsp9\" (UID: \"916c6d8b-b83d-4f53-8bfd-7e8c8edbb2ad\") " pod="openstack/glance-db-sync-tlsp9" Oct 11 04:10:45 crc kubenswrapper[4798]: I1011 04:10:45.860909 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/916c6d8b-b83d-4f53-8bfd-7e8c8edbb2ad-config-data\") pod \"glance-db-sync-tlsp9\" (UID: \"916c6d8b-b83d-4f53-8bfd-7e8c8edbb2ad\") " pod="openstack/glance-db-sync-tlsp9" Oct 11 04:10:45 crc kubenswrapper[4798]: I1011 04:10:45.860989 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7cw2n\" (UniqueName: \"kubernetes.io/projected/916c6d8b-b83d-4f53-8bfd-7e8c8edbb2ad-kube-api-access-7cw2n\") pod \"glance-db-sync-tlsp9\" (UID: \"916c6d8b-b83d-4f53-8bfd-7e8c8edbb2ad\") " pod="openstack/glance-db-sync-tlsp9" Oct 11 04:10:45 crc kubenswrapper[4798]: I1011 04:10:45.861140 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/916c6d8b-b83d-4f53-8bfd-7e8c8edbb2ad-db-sync-config-data\") pod \"glance-db-sync-tlsp9\" (UID: \"916c6d8b-b83d-4f53-8bfd-7e8c8edbb2ad\") " pod="openstack/glance-db-sync-tlsp9" Oct 11 04:10:45 crc kubenswrapper[4798]: I1011 04:10:45.877649 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-tk4gr" Oct 11 04:10:45 crc kubenswrapper[4798]: I1011 04:10:45.877797 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-tk4gr" Oct 11 04:10:45 crc kubenswrapper[4798]: I1011 04:10:45.963636 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7cw2n\" (UniqueName: \"kubernetes.io/projected/916c6d8b-b83d-4f53-8bfd-7e8c8edbb2ad-kube-api-access-7cw2n\") pod \"glance-db-sync-tlsp9\" (UID: \"916c6d8b-b83d-4f53-8bfd-7e8c8edbb2ad\") " pod="openstack/glance-db-sync-tlsp9" Oct 11 04:10:45 crc kubenswrapper[4798]: I1011 04:10:45.963756 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/916c6d8b-b83d-4f53-8bfd-7e8c8edbb2ad-db-sync-config-data\") pod \"glance-db-sync-tlsp9\" (UID: \"916c6d8b-b83d-4f53-8bfd-7e8c8edbb2ad\") " pod="openstack/glance-db-sync-tlsp9" Oct 11 04:10:45 crc kubenswrapper[4798]: I1011 04:10:45.963803 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/916c6d8b-b83d-4f53-8bfd-7e8c8edbb2ad-combined-ca-bundle\") pod \"glance-db-sync-tlsp9\" (UID: \"916c6d8b-b83d-4f53-8bfd-7e8c8edbb2ad\") " pod="openstack/glance-db-sync-tlsp9" Oct 11 04:10:45 crc kubenswrapper[4798]: I1011 04:10:45.964810 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/916c6d8b-b83d-4f53-8bfd-7e8c8edbb2ad-config-data\") pod \"glance-db-sync-tlsp9\" (UID: \"916c6d8b-b83d-4f53-8bfd-7e8c8edbb2ad\") " pod="openstack/glance-db-sync-tlsp9" Oct 11 04:10:45 crc kubenswrapper[4798]: I1011 04:10:45.969801 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/916c6d8b-b83d-4f53-8bfd-7e8c8edbb2ad-db-sync-config-data\") pod \"glance-db-sync-tlsp9\" (UID: \"916c6d8b-b83d-4f53-8bfd-7e8c8edbb2ad\") " pod="openstack/glance-db-sync-tlsp9" Oct 11 04:10:45 crc kubenswrapper[4798]: I1011 04:10:45.970356 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/916c6d8b-b83d-4f53-8bfd-7e8c8edbb2ad-combined-ca-bundle\") pod \"glance-db-sync-tlsp9\" (UID: \"916c6d8b-b83d-4f53-8bfd-7e8c8edbb2ad\") " pod="openstack/glance-db-sync-tlsp9" Oct 11 04:10:45 crc kubenswrapper[4798]: I1011 04:10:45.977328 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/916c6d8b-b83d-4f53-8bfd-7e8c8edbb2ad-config-data\") pod \"glance-db-sync-tlsp9\" (UID: \"916c6d8b-b83d-4f53-8bfd-7e8c8edbb2ad\") " pod="openstack/glance-db-sync-tlsp9" Oct 11 04:10:45 crc kubenswrapper[4798]: I1011 04:10:45.981318 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7cw2n\" (UniqueName: \"kubernetes.io/projected/916c6d8b-b83d-4f53-8bfd-7e8c8edbb2ad-kube-api-access-7cw2n\") pod \"glance-db-sync-tlsp9\" (UID: \"916c6d8b-b83d-4f53-8bfd-7e8c8edbb2ad\") " pod="openstack/glance-db-sync-tlsp9" Oct 11 04:10:46 crc kubenswrapper[4798]: I1011 04:10:46.022621 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-tlsp9" Oct 11 04:10:46 crc kubenswrapper[4798]: I1011 04:10:46.098658 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-sv678-config-glwzf"] Oct 11 04:10:46 crc kubenswrapper[4798]: I1011 04:10:46.099827 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-sv678-config-glwzf" Oct 11 04:10:46 crc kubenswrapper[4798]: I1011 04:10:46.104721 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-extra-scripts" Oct 11 04:10:46 crc kubenswrapper[4798]: I1011 04:10:46.106696 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-sv678-config-glwzf"] Oct 11 04:10:46 crc kubenswrapper[4798]: I1011 04:10:46.271436 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/3d64c54c-e1f2-4f5a-864f-c2ec9723779e-var-run\") pod \"ovn-controller-sv678-config-glwzf\" (UID: \"3d64c54c-e1f2-4f5a-864f-c2ec9723779e\") " pod="openstack/ovn-controller-sv678-config-glwzf" Oct 11 04:10:46 crc kubenswrapper[4798]: I1011 04:10:46.271560 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/3d64c54c-e1f2-4f5a-864f-c2ec9723779e-scripts\") pod \"ovn-controller-sv678-config-glwzf\" (UID: \"3d64c54c-e1f2-4f5a-864f-c2ec9723779e\") " pod="openstack/ovn-controller-sv678-config-glwzf" Oct 11 04:10:46 crc kubenswrapper[4798]: I1011 04:10:46.271602 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/3d64c54c-e1f2-4f5a-864f-c2ec9723779e-additional-scripts\") pod \"ovn-controller-sv678-config-glwzf\" (UID: \"3d64c54c-e1f2-4f5a-864f-c2ec9723779e\") " pod="openstack/ovn-controller-sv678-config-glwzf" Oct 11 04:10:46 crc kubenswrapper[4798]: I1011 04:10:46.271671 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ztkvd\" (UniqueName: \"kubernetes.io/projected/3d64c54c-e1f2-4f5a-864f-c2ec9723779e-kube-api-access-ztkvd\") pod \"ovn-controller-sv678-config-glwzf\" (UID: \"3d64c54c-e1f2-4f5a-864f-c2ec9723779e\") " pod="openstack/ovn-controller-sv678-config-glwzf" Oct 11 04:10:46 crc kubenswrapper[4798]: I1011 04:10:46.271714 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/3d64c54c-e1f2-4f5a-864f-c2ec9723779e-var-run-ovn\") pod \"ovn-controller-sv678-config-glwzf\" (UID: \"3d64c54c-e1f2-4f5a-864f-c2ec9723779e\") " pod="openstack/ovn-controller-sv678-config-glwzf" Oct 11 04:10:46 crc kubenswrapper[4798]: I1011 04:10:46.271824 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/3d64c54c-e1f2-4f5a-864f-c2ec9723779e-var-log-ovn\") pod \"ovn-controller-sv678-config-glwzf\" (UID: \"3d64c54c-e1f2-4f5a-864f-c2ec9723779e\") " pod="openstack/ovn-controller-sv678-config-glwzf" Oct 11 04:10:46 crc kubenswrapper[4798]: I1011 04:10:46.373428 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/3d64c54c-e1f2-4f5a-864f-c2ec9723779e-var-log-ovn\") pod \"ovn-controller-sv678-config-glwzf\" (UID: \"3d64c54c-e1f2-4f5a-864f-c2ec9723779e\") " pod="openstack/ovn-controller-sv678-config-glwzf" Oct 11 04:10:46 crc kubenswrapper[4798]: I1011 04:10:46.373772 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/3d64c54c-e1f2-4f5a-864f-c2ec9723779e-var-log-ovn\") pod \"ovn-controller-sv678-config-glwzf\" (UID: \"3d64c54c-e1f2-4f5a-864f-c2ec9723779e\") " pod="openstack/ovn-controller-sv678-config-glwzf" Oct 11 04:10:46 crc kubenswrapper[4798]: I1011 04:10:46.373857 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/3d64c54c-e1f2-4f5a-864f-c2ec9723779e-var-run\") pod \"ovn-controller-sv678-config-glwzf\" (UID: \"3d64c54c-e1f2-4f5a-864f-c2ec9723779e\") " pod="openstack/ovn-controller-sv678-config-glwzf" Oct 11 04:10:46 crc kubenswrapper[4798]: I1011 04:10:46.373929 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/3d64c54c-e1f2-4f5a-864f-c2ec9723779e-scripts\") pod \"ovn-controller-sv678-config-glwzf\" (UID: \"3d64c54c-e1f2-4f5a-864f-c2ec9723779e\") " pod="openstack/ovn-controller-sv678-config-glwzf" Oct 11 04:10:46 crc kubenswrapper[4798]: I1011 04:10:46.373993 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/3d64c54c-e1f2-4f5a-864f-c2ec9723779e-var-run\") pod \"ovn-controller-sv678-config-glwzf\" (UID: \"3d64c54c-e1f2-4f5a-864f-c2ec9723779e\") " pod="openstack/ovn-controller-sv678-config-glwzf" Oct 11 04:10:46 crc kubenswrapper[4798]: I1011 04:10:46.374973 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/3d64c54c-e1f2-4f5a-864f-c2ec9723779e-additional-scripts\") pod \"ovn-controller-sv678-config-glwzf\" (UID: \"3d64c54c-e1f2-4f5a-864f-c2ec9723779e\") " pod="openstack/ovn-controller-sv678-config-glwzf" Oct 11 04:10:46 crc kubenswrapper[4798]: I1011 04:10:46.375099 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/3d64c54c-e1f2-4f5a-864f-c2ec9723779e-additional-scripts\") pod \"ovn-controller-sv678-config-glwzf\" (UID: \"3d64c54c-e1f2-4f5a-864f-c2ec9723779e\") " pod="openstack/ovn-controller-sv678-config-glwzf" Oct 11 04:10:46 crc kubenswrapper[4798]: I1011 04:10:46.375147 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ztkvd\" (UniqueName: \"kubernetes.io/projected/3d64c54c-e1f2-4f5a-864f-c2ec9723779e-kube-api-access-ztkvd\") pod \"ovn-controller-sv678-config-glwzf\" (UID: \"3d64c54c-e1f2-4f5a-864f-c2ec9723779e\") " pod="openstack/ovn-controller-sv678-config-glwzf" Oct 11 04:10:46 crc kubenswrapper[4798]: I1011 04:10:46.375287 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/3d64c54c-e1f2-4f5a-864f-c2ec9723779e-var-run-ovn\") pod \"ovn-controller-sv678-config-glwzf\" (UID: \"3d64c54c-e1f2-4f5a-864f-c2ec9723779e\") " pod="openstack/ovn-controller-sv678-config-glwzf" Oct 11 04:10:46 crc kubenswrapper[4798]: I1011 04:10:46.375363 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/3d64c54c-e1f2-4f5a-864f-c2ec9723779e-var-run-ovn\") pod \"ovn-controller-sv678-config-glwzf\" (UID: \"3d64c54c-e1f2-4f5a-864f-c2ec9723779e\") " pod="openstack/ovn-controller-sv678-config-glwzf" Oct 11 04:10:46 crc kubenswrapper[4798]: I1011 04:10:46.377077 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/3d64c54c-e1f2-4f5a-864f-c2ec9723779e-scripts\") pod \"ovn-controller-sv678-config-glwzf\" (UID: \"3d64c54c-e1f2-4f5a-864f-c2ec9723779e\") " pod="openstack/ovn-controller-sv678-config-glwzf" Oct 11 04:10:46 crc kubenswrapper[4798]: I1011 04:10:46.395124 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ztkvd\" (UniqueName: \"kubernetes.io/projected/3d64c54c-e1f2-4f5a-864f-c2ec9723779e-kube-api-access-ztkvd\") pod \"ovn-controller-sv678-config-glwzf\" (UID: \"3d64c54c-e1f2-4f5a-864f-c2ec9723779e\") " pod="openstack/ovn-controller-sv678-config-glwzf" Oct 11 04:10:46 crc kubenswrapper[4798]: I1011 04:10:46.447706 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-sv678-config-glwzf" Oct 11 04:10:46 crc kubenswrapper[4798]: I1011 04:10:46.557670 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-tlsp9"] Oct 11 04:10:46 crc kubenswrapper[4798]: W1011 04:10:46.574335 4798 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod916c6d8b_b83d_4f53_8bfd_7e8c8edbb2ad.slice/crio-bb6083f20716e514353067bfa8dc49d2e7ef78315258ecb9bb2780759b7da578 WatchSource:0}: Error finding container bb6083f20716e514353067bfa8dc49d2e7ef78315258ecb9bb2780759b7da578: Status 404 returned error can't find the container with id bb6083f20716e514353067bfa8dc49d2e7ef78315258ecb9bb2780759b7da578 Oct 11 04:10:46 crc kubenswrapper[4798]: I1011 04:10:46.715210 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-sv678-config-glwzf"] Oct 11 04:10:46 crc kubenswrapper[4798]: W1011 04:10:46.773476 4798 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3d64c54c_e1f2_4f5a_864f_c2ec9723779e.slice/crio-72f5cf146ab2758e9f41be5323dffef8b8fcaeb677536e4e57d20aaa1ad3424a WatchSource:0}: Error finding container 72f5cf146ab2758e9f41be5323dffef8b8fcaeb677536e4e57d20aaa1ad3424a: Status 404 returned error can't find the container with id 72f5cf146ab2758e9f41be5323dffef8b8fcaeb677536e4e57d20aaa1ad3424a Oct 11 04:10:47 crc kubenswrapper[4798]: I1011 04:10:47.293759 4798 generic.go:334] "Generic (PLEG): container finished" podID="f7b18bbb-aead-4356-99b3-f1ee253f86e8" containerID="b460d449f79e7f0722844afeca28dbfdb51a6a5f706ded8dadd5bf0066b6d520" exitCode=0 Oct 11 04:10:47 crc kubenswrapper[4798]: I1011 04:10:47.293882 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"f7b18bbb-aead-4356-99b3-f1ee253f86e8","Type":"ContainerDied","Data":"b460d449f79e7f0722844afeca28dbfdb51a6a5f706ded8dadd5bf0066b6d520"} Oct 11 04:10:47 crc kubenswrapper[4798]: I1011 04:10:47.300094 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-sv678-config-glwzf" event={"ID":"3d64c54c-e1f2-4f5a-864f-c2ec9723779e","Type":"ContainerStarted","Data":"9ae9a24e22a6942b2037276acde54483ffc2f9a5887c52cd49362217cdd6f6ef"} Oct 11 04:10:47 crc kubenswrapper[4798]: I1011 04:10:47.300157 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-sv678-config-glwzf" event={"ID":"3d64c54c-e1f2-4f5a-864f-c2ec9723779e","Type":"ContainerStarted","Data":"72f5cf146ab2758e9f41be5323dffef8b8fcaeb677536e4e57d20aaa1ad3424a"} Oct 11 04:10:47 crc kubenswrapper[4798]: I1011 04:10:47.302658 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-tlsp9" event={"ID":"916c6d8b-b83d-4f53-8bfd-7e8c8edbb2ad","Type":"ContainerStarted","Data":"bb6083f20716e514353067bfa8dc49d2e7ef78315258ecb9bb2780759b7da578"} Oct 11 04:10:47 crc kubenswrapper[4798]: I1011 04:10:47.304376 4798 generic.go:334] "Generic (PLEG): container finished" podID="eff19131-d13b-495f-94a3-097e030fd3cd" containerID="030275056fe2ac998fca1a9e12a7334f798986e02b40dd3ccf746ef1ee118864" exitCode=0 Oct 11 04:10:47 crc kubenswrapper[4798]: I1011 04:10:47.304751 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"eff19131-d13b-495f-94a3-097e030fd3cd","Type":"ContainerDied","Data":"030275056fe2ac998fca1a9e12a7334f798986e02b40dd3ccf746ef1ee118864"} Oct 11 04:10:47 crc kubenswrapper[4798]: I1011 04:10:47.370708 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-sv678-config-glwzf" podStartSLOduration=1.3706788300000001 podStartE2EDuration="1.37067883s" podCreationTimestamp="2025-10-11 04:10:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 04:10:47.36106786 +0000 UTC m=+942.697357566" watchObservedRunningTime="2025-10-11 04:10:47.37067883 +0000 UTC m=+942.706968516" Oct 11 04:10:48 crc kubenswrapper[4798]: I1011 04:10:48.317444 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"eff19131-d13b-495f-94a3-097e030fd3cd","Type":"ContainerStarted","Data":"e4ce79436847e4997249775c35dd097ae6234543ccd06b3be91fee95ce038ee8"} Oct 11 04:10:48 crc kubenswrapper[4798]: I1011 04:10:48.318050 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-0" Oct 11 04:10:48 crc kubenswrapper[4798]: I1011 04:10:48.322556 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"f7b18bbb-aead-4356-99b3-f1ee253f86e8","Type":"ContainerStarted","Data":"faf08274c494cd3fe463ce8dd317d209c1bb6c482204375c1aa72645d1c00b9b"} Oct 11 04:10:48 crc kubenswrapper[4798]: I1011 04:10:48.322801 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-cell1-server-0" Oct 11 04:10:48 crc kubenswrapper[4798]: I1011 04:10:48.324534 4798 generic.go:334] "Generic (PLEG): container finished" podID="3d64c54c-e1f2-4f5a-864f-c2ec9723779e" containerID="9ae9a24e22a6942b2037276acde54483ffc2f9a5887c52cd49362217cdd6f6ef" exitCode=0 Oct 11 04:10:48 crc kubenswrapper[4798]: I1011 04:10:48.324568 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-sv678-config-glwzf" event={"ID":"3d64c54c-e1f2-4f5a-864f-c2ec9723779e","Type":"ContainerDied","Data":"9ae9a24e22a6942b2037276acde54483ffc2f9a5887c52cd49362217cdd6f6ef"} Oct 11 04:10:48 crc kubenswrapper[4798]: I1011 04:10:48.346805 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-0" podStartSLOduration=53.141875111 podStartE2EDuration="1m3.346787474s" podCreationTimestamp="2025-10-11 04:09:45 +0000 UTC" firstStartedPulling="2025-10-11 04:10:02.044987587 +0000 UTC m=+897.381277273" lastFinishedPulling="2025-10-11 04:10:12.24989995 +0000 UTC m=+907.586189636" observedRunningTime="2025-10-11 04:10:48.340209466 +0000 UTC m=+943.676499162" watchObservedRunningTime="2025-10-11 04:10:48.346787474 +0000 UTC m=+943.683077160" Oct 11 04:10:48 crc kubenswrapper[4798]: I1011 04:10:48.381081 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-cell1-server-0" podStartSLOduration=52.808951712 podStartE2EDuration="1m3.381058412s" podCreationTimestamp="2025-10-11 04:09:45 +0000 UTC" firstStartedPulling="2025-10-11 04:10:02.485873726 +0000 UTC m=+897.822163412" lastFinishedPulling="2025-10-11 04:10:13.057980426 +0000 UTC m=+908.394270112" observedRunningTime="2025-10-11 04:10:48.378164303 +0000 UTC m=+943.714453989" watchObservedRunningTime="2025-10-11 04:10:48.381058412 +0000 UTC m=+943.717348098" Oct 11 04:10:49 crc kubenswrapper[4798]: I1011 04:10:49.693305 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-sv678-config-glwzf" Oct 11 04:10:49 crc kubenswrapper[4798]: I1011 04:10:49.737822 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ztkvd\" (UniqueName: \"kubernetes.io/projected/3d64c54c-e1f2-4f5a-864f-c2ec9723779e-kube-api-access-ztkvd\") pod \"3d64c54c-e1f2-4f5a-864f-c2ec9723779e\" (UID: \"3d64c54c-e1f2-4f5a-864f-c2ec9723779e\") " Oct 11 04:10:49 crc kubenswrapper[4798]: I1011 04:10:49.737875 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/3d64c54c-e1f2-4f5a-864f-c2ec9723779e-var-run-ovn\") pod \"3d64c54c-e1f2-4f5a-864f-c2ec9723779e\" (UID: \"3d64c54c-e1f2-4f5a-864f-c2ec9723779e\") " Oct 11 04:10:49 crc kubenswrapper[4798]: I1011 04:10:49.737919 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/3d64c54c-e1f2-4f5a-864f-c2ec9723779e-var-run\") pod \"3d64c54c-e1f2-4f5a-864f-c2ec9723779e\" (UID: \"3d64c54c-e1f2-4f5a-864f-c2ec9723779e\") " Oct 11 04:10:49 crc kubenswrapper[4798]: I1011 04:10:49.737957 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/3d64c54c-e1f2-4f5a-864f-c2ec9723779e-additional-scripts\") pod \"3d64c54c-e1f2-4f5a-864f-c2ec9723779e\" (UID: \"3d64c54c-e1f2-4f5a-864f-c2ec9723779e\") " Oct 11 04:10:49 crc kubenswrapper[4798]: I1011 04:10:49.737989 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/3d64c54c-e1f2-4f5a-864f-c2ec9723779e-scripts\") pod \"3d64c54c-e1f2-4f5a-864f-c2ec9723779e\" (UID: \"3d64c54c-e1f2-4f5a-864f-c2ec9723779e\") " Oct 11 04:10:49 crc kubenswrapper[4798]: I1011 04:10:49.738046 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/3d64c54c-e1f2-4f5a-864f-c2ec9723779e-var-log-ovn\") pod \"3d64c54c-e1f2-4f5a-864f-c2ec9723779e\" (UID: \"3d64c54c-e1f2-4f5a-864f-c2ec9723779e\") " Oct 11 04:10:49 crc kubenswrapper[4798]: I1011 04:10:49.738744 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3d64c54c-e1f2-4f5a-864f-c2ec9723779e-var-log-ovn" (OuterVolumeSpecName: "var-log-ovn") pod "3d64c54c-e1f2-4f5a-864f-c2ec9723779e" (UID: "3d64c54c-e1f2-4f5a-864f-c2ec9723779e"). InnerVolumeSpecName "var-log-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 11 04:10:49 crc kubenswrapper[4798]: I1011 04:10:49.739322 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3d64c54c-e1f2-4f5a-864f-c2ec9723779e-var-run" (OuterVolumeSpecName: "var-run") pod "3d64c54c-e1f2-4f5a-864f-c2ec9723779e" (UID: "3d64c54c-e1f2-4f5a-864f-c2ec9723779e"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 11 04:10:49 crc kubenswrapper[4798]: I1011 04:10:49.739372 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3d64c54c-e1f2-4f5a-864f-c2ec9723779e-var-run-ovn" (OuterVolumeSpecName: "var-run-ovn") pod "3d64c54c-e1f2-4f5a-864f-c2ec9723779e" (UID: "3d64c54c-e1f2-4f5a-864f-c2ec9723779e"). InnerVolumeSpecName "var-run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 11 04:10:49 crc kubenswrapper[4798]: I1011 04:10:49.740034 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3d64c54c-e1f2-4f5a-864f-c2ec9723779e-additional-scripts" (OuterVolumeSpecName: "additional-scripts") pod "3d64c54c-e1f2-4f5a-864f-c2ec9723779e" (UID: "3d64c54c-e1f2-4f5a-864f-c2ec9723779e"). InnerVolumeSpecName "additional-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 04:10:49 crc kubenswrapper[4798]: I1011 04:10:49.740499 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3d64c54c-e1f2-4f5a-864f-c2ec9723779e-scripts" (OuterVolumeSpecName: "scripts") pod "3d64c54c-e1f2-4f5a-864f-c2ec9723779e" (UID: "3d64c54c-e1f2-4f5a-864f-c2ec9723779e"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 04:10:49 crc kubenswrapper[4798]: I1011 04:10:49.746406 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3d64c54c-e1f2-4f5a-864f-c2ec9723779e-kube-api-access-ztkvd" (OuterVolumeSpecName: "kube-api-access-ztkvd") pod "3d64c54c-e1f2-4f5a-864f-c2ec9723779e" (UID: "3d64c54c-e1f2-4f5a-864f-c2ec9723779e"). InnerVolumeSpecName "kube-api-access-ztkvd". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 04:10:49 crc kubenswrapper[4798]: I1011 04:10:49.839942 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ztkvd\" (UniqueName: \"kubernetes.io/projected/3d64c54c-e1f2-4f5a-864f-c2ec9723779e-kube-api-access-ztkvd\") on node \"crc\" DevicePath \"\"" Oct 11 04:10:49 crc kubenswrapper[4798]: I1011 04:10:49.839987 4798 reconciler_common.go:293] "Volume detached for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/3d64c54c-e1f2-4f5a-864f-c2ec9723779e-var-run-ovn\") on node \"crc\" DevicePath \"\"" Oct 11 04:10:49 crc kubenswrapper[4798]: I1011 04:10:49.840000 4798 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/3d64c54c-e1f2-4f5a-864f-c2ec9723779e-var-run\") on node \"crc\" DevicePath \"\"" Oct 11 04:10:49 crc kubenswrapper[4798]: I1011 04:10:49.840011 4798 reconciler_common.go:293] "Volume detached for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/3d64c54c-e1f2-4f5a-864f-c2ec9723779e-additional-scripts\") on node \"crc\" DevicePath \"\"" Oct 11 04:10:49 crc kubenswrapper[4798]: I1011 04:10:49.840025 4798 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/3d64c54c-e1f2-4f5a-864f-c2ec9723779e-scripts\") on node \"crc\" DevicePath \"\"" Oct 11 04:10:49 crc kubenswrapper[4798]: I1011 04:10:49.840037 4798 reconciler_common.go:293] "Volume detached for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/3d64c54c-e1f2-4f5a-864f-c2ec9723779e-var-log-ovn\") on node \"crc\" DevicePath \"\"" Oct 11 04:10:49 crc kubenswrapper[4798]: I1011 04:10:49.967501 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-3b28-account-create-7w992"] Oct 11 04:10:49 crc kubenswrapper[4798]: E1011 04:10:49.967892 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3d64c54c-e1f2-4f5a-864f-c2ec9723779e" containerName="ovn-config" Oct 11 04:10:49 crc kubenswrapper[4798]: I1011 04:10:49.967915 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="3d64c54c-e1f2-4f5a-864f-c2ec9723779e" containerName="ovn-config" Oct 11 04:10:49 crc kubenswrapper[4798]: I1011 04:10:49.968101 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="3d64c54c-e1f2-4f5a-864f-c2ec9723779e" containerName="ovn-config" Oct 11 04:10:49 crc kubenswrapper[4798]: I1011 04:10:49.968789 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-3b28-account-create-7w992" Oct 11 04:10:49 crc kubenswrapper[4798]: I1011 04:10:49.971167 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-db-secret" Oct 11 04:10:49 crc kubenswrapper[4798]: I1011 04:10:49.993426 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-3b28-account-create-7w992"] Oct 11 04:10:50 crc kubenswrapper[4798]: I1011 04:10:50.044464 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n4fq5\" (UniqueName: \"kubernetes.io/projected/1c998cfd-5665-44b4-865c-122eaa9effac-kube-api-access-n4fq5\") pod \"keystone-3b28-account-create-7w992\" (UID: \"1c998cfd-5665-44b4-865c-122eaa9effac\") " pod="openstack/keystone-3b28-account-create-7w992" Oct 11 04:10:50 crc kubenswrapper[4798]: I1011 04:10:50.146614 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n4fq5\" (UniqueName: \"kubernetes.io/projected/1c998cfd-5665-44b4-865c-122eaa9effac-kube-api-access-n4fq5\") pod \"keystone-3b28-account-create-7w992\" (UID: \"1c998cfd-5665-44b4-865c-122eaa9effac\") " pod="openstack/keystone-3b28-account-create-7w992" Oct 11 04:10:50 crc kubenswrapper[4798]: I1011 04:10:50.171282 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n4fq5\" (UniqueName: \"kubernetes.io/projected/1c998cfd-5665-44b4-865c-122eaa9effac-kube-api-access-n4fq5\") pod \"keystone-3b28-account-create-7w992\" (UID: \"1c998cfd-5665-44b4-865c-122eaa9effac\") " pod="openstack/keystone-3b28-account-create-7w992" Oct 11 04:10:50 crc kubenswrapper[4798]: I1011 04:10:50.284506 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-3b28-account-create-7w992" Oct 11 04:10:50 crc kubenswrapper[4798]: I1011 04:10:50.351306 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-sv678-config-glwzf" event={"ID":"3d64c54c-e1f2-4f5a-864f-c2ec9723779e","Type":"ContainerDied","Data":"72f5cf146ab2758e9f41be5323dffef8b8fcaeb677536e4e57d20aaa1ad3424a"} Oct 11 04:10:50 crc kubenswrapper[4798]: I1011 04:10:50.351351 4798 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="72f5cf146ab2758e9f41be5323dffef8b8fcaeb677536e4e57d20aaa1ad3424a" Oct 11 04:10:50 crc kubenswrapper[4798]: I1011 04:10:50.351410 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-sv678-config-glwzf" Oct 11 04:10:50 crc kubenswrapper[4798]: I1011 04:10:50.718971 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-3b28-account-create-7w992"] Oct 11 04:10:50 crc kubenswrapper[4798]: I1011 04:10:50.844636 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-sv678-config-glwzf"] Oct 11 04:10:50 crc kubenswrapper[4798]: I1011 04:10:50.854172 4798 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-sv678-config-glwzf"] Oct 11 04:10:50 crc kubenswrapper[4798]: I1011 04:10:50.859815 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-sv678" Oct 11 04:10:50 crc kubenswrapper[4798]: I1011 04:10:50.930838 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-sv678-config-l8plh"] Oct 11 04:10:50 crc kubenswrapper[4798]: I1011 04:10:50.931844 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-sv678-config-l8plh" Oct 11 04:10:50 crc kubenswrapper[4798]: I1011 04:10:50.943534 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-extra-scripts" Oct 11 04:10:50 crc kubenswrapper[4798]: I1011 04:10:50.952070 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-sv678-config-l8plh"] Oct 11 04:10:50 crc kubenswrapper[4798]: I1011 04:10:50.966867 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/657bf716-9609-4533-a42f-f0f58f40884d-var-run\") pod \"ovn-controller-sv678-config-l8plh\" (UID: \"657bf716-9609-4533-a42f-f0f58f40884d\") " pod="openstack/ovn-controller-sv678-config-l8plh" Oct 11 04:10:50 crc kubenswrapper[4798]: I1011 04:10:50.967240 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/657bf716-9609-4533-a42f-f0f58f40884d-scripts\") pod \"ovn-controller-sv678-config-l8plh\" (UID: \"657bf716-9609-4533-a42f-f0f58f40884d\") " pod="openstack/ovn-controller-sv678-config-l8plh" Oct 11 04:10:50 crc kubenswrapper[4798]: I1011 04:10:50.967260 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/657bf716-9609-4533-a42f-f0f58f40884d-var-log-ovn\") pod \"ovn-controller-sv678-config-l8plh\" (UID: \"657bf716-9609-4533-a42f-f0f58f40884d\") " pod="openstack/ovn-controller-sv678-config-l8plh" Oct 11 04:10:50 crc kubenswrapper[4798]: I1011 04:10:50.967302 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/657bf716-9609-4533-a42f-f0f58f40884d-var-run-ovn\") pod \"ovn-controller-sv678-config-l8plh\" (UID: \"657bf716-9609-4533-a42f-f0f58f40884d\") " pod="openstack/ovn-controller-sv678-config-l8plh" Oct 11 04:10:50 crc kubenswrapper[4798]: I1011 04:10:50.967321 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8xgq6\" (UniqueName: \"kubernetes.io/projected/657bf716-9609-4533-a42f-f0f58f40884d-kube-api-access-8xgq6\") pod \"ovn-controller-sv678-config-l8plh\" (UID: \"657bf716-9609-4533-a42f-f0f58f40884d\") " pod="openstack/ovn-controller-sv678-config-l8plh" Oct 11 04:10:50 crc kubenswrapper[4798]: I1011 04:10:50.967339 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/657bf716-9609-4533-a42f-f0f58f40884d-additional-scripts\") pod \"ovn-controller-sv678-config-l8plh\" (UID: \"657bf716-9609-4533-a42f-f0f58f40884d\") " pod="openstack/ovn-controller-sv678-config-l8plh" Oct 11 04:10:51 crc kubenswrapper[4798]: I1011 04:10:51.069243 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8xgq6\" (UniqueName: \"kubernetes.io/projected/657bf716-9609-4533-a42f-f0f58f40884d-kube-api-access-8xgq6\") pod \"ovn-controller-sv678-config-l8plh\" (UID: \"657bf716-9609-4533-a42f-f0f58f40884d\") " pod="openstack/ovn-controller-sv678-config-l8plh" Oct 11 04:10:51 crc kubenswrapper[4798]: I1011 04:10:51.069612 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/657bf716-9609-4533-a42f-f0f58f40884d-additional-scripts\") pod \"ovn-controller-sv678-config-l8plh\" (UID: \"657bf716-9609-4533-a42f-f0f58f40884d\") " pod="openstack/ovn-controller-sv678-config-l8plh" Oct 11 04:10:51 crc kubenswrapper[4798]: I1011 04:10:51.070423 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/657bf716-9609-4533-a42f-f0f58f40884d-additional-scripts\") pod \"ovn-controller-sv678-config-l8plh\" (UID: \"657bf716-9609-4533-a42f-f0f58f40884d\") " pod="openstack/ovn-controller-sv678-config-l8plh" Oct 11 04:10:51 crc kubenswrapper[4798]: I1011 04:10:51.070784 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/657bf716-9609-4533-a42f-f0f58f40884d-var-run\") pod \"ovn-controller-sv678-config-l8plh\" (UID: \"657bf716-9609-4533-a42f-f0f58f40884d\") " pod="openstack/ovn-controller-sv678-config-l8plh" Oct 11 04:10:51 crc kubenswrapper[4798]: I1011 04:10:51.070817 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/657bf716-9609-4533-a42f-f0f58f40884d-var-run\") pod \"ovn-controller-sv678-config-l8plh\" (UID: \"657bf716-9609-4533-a42f-f0f58f40884d\") " pod="openstack/ovn-controller-sv678-config-l8plh" Oct 11 04:10:51 crc kubenswrapper[4798]: I1011 04:10:51.070897 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/657bf716-9609-4533-a42f-f0f58f40884d-scripts\") pod \"ovn-controller-sv678-config-l8plh\" (UID: \"657bf716-9609-4533-a42f-f0f58f40884d\") " pod="openstack/ovn-controller-sv678-config-l8plh" Oct 11 04:10:51 crc kubenswrapper[4798]: I1011 04:10:51.070925 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/657bf716-9609-4533-a42f-f0f58f40884d-var-log-ovn\") pod \"ovn-controller-sv678-config-l8plh\" (UID: \"657bf716-9609-4533-a42f-f0f58f40884d\") " pod="openstack/ovn-controller-sv678-config-l8plh" Oct 11 04:10:51 crc kubenswrapper[4798]: I1011 04:10:51.071277 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/657bf716-9609-4533-a42f-f0f58f40884d-var-log-ovn\") pod \"ovn-controller-sv678-config-l8plh\" (UID: \"657bf716-9609-4533-a42f-f0f58f40884d\") " pod="openstack/ovn-controller-sv678-config-l8plh" Oct 11 04:10:51 crc kubenswrapper[4798]: I1011 04:10:51.072853 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/657bf716-9609-4533-a42f-f0f58f40884d-scripts\") pod \"ovn-controller-sv678-config-l8plh\" (UID: \"657bf716-9609-4533-a42f-f0f58f40884d\") " pod="openstack/ovn-controller-sv678-config-l8plh" Oct 11 04:10:51 crc kubenswrapper[4798]: I1011 04:10:51.073163 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/657bf716-9609-4533-a42f-f0f58f40884d-var-run-ovn\") pod \"ovn-controller-sv678-config-l8plh\" (UID: \"657bf716-9609-4533-a42f-f0f58f40884d\") " pod="openstack/ovn-controller-sv678-config-l8plh" Oct 11 04:10:51 crc kubenswrapper[4798]: I1011 04:10:51.073254 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/657bf716-9609-4533-a42f-f0f58f40884d-var-run-ovn\") pod \"ovn-controller-sv678-config-l8plh\" (UID: \"657bf716-9609-4533-a42f-f0f58f40884d\") " pod="openstack/ovn-controller-sv678-config-l8plh" Oct 11 04:10:51 crc kubenswrapper[4798]: I1011 04:10:51.087912 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8xgq6\" (UniqueName: \"kubernetes.io/projected/657bf716-9609-4533-a42f-f0f58f40884d-kube-api-access-8xgq6\") pod \"ovn-controller-sv678-config-l8plh\" (UID: \"657bf716-9609-4533-a42f-f0f58f40884d\") " pod="openstack/ovn-controller-sv678-config-l8plh" Oct 11 04:10:51 crc kubenswrapper[4798]: I1011 04:10:51.257513 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-sv678-config-l8plh" Oct 11 04:10:51 crc kubenswrapper[4798]: I1011 04:10:51.361527 4798 generic.go:334] "Generic (PLEG): container finished" podID="1c998cfd-5665-44b4-865c-122eaa9effac" containerID="1112ef84fa4ce584316d1bf971ebaffd58b29e3d065518fe162acd42e5805d47" exitCode=0 Oct 11 04:10:51 crc kubenswrapper[4798]: I1011 04:10:51.361591 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-3b28-account-create-7w992" event={"ID":"1c998cfd-5665-44b4-865c-122eaa9effac","Type":"ContainerDied","Data":"1112ef84fa4ce584316d1bf971ebaffd58b29e3d065518fe162acd42e5805d47"} Oct 11 04:10:51 crc kubenswrapper[4798]: I1011 04:10:51.361626 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-3b28-account-create-7w992" event={"ID":"1c998cfd-5665-44b4-865c-122eaa9effac","Type":"ContainerStarted","Data":"c5cf75f7115a5bf57f4d76b6c370e96e453dbd32f1aa85d280f431b56036901b"} Oct 11 04:10:51 crc kubenswrapper[4798]: I1011 04:10:51.436793 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3d64c54c-e1f2-4f5a-864f-c2ec9723779e" path="/var/lib/kubelet/pods/3d64c54c-e1f2-4f5a-864f-c2ec9723779e/volumes" Oct 11 04:10:51 crc kubenswrapper[4798]: I1011 04:10:51.739163 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-sv678-config-l8plh"] Oct 11 04:10:51 crc kubenswrapper[4798]: W1011 04:10:51.752724 4798 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod657bf716_9609_4533_a42f_f0f58f40884d.slice/crio-ce839fc0296dbfe0b57f069ddccded4e12686ff34bd0b5fbeb29d381ec3e8d50 WatchSource:0}: Error finding container ce839fc0296dbfe0b57f069ddccded4e12686ff34bd0b5fbeb29d381ec3e8d50: Status 404 returned error can't find the container with id ce839fc0296dbfe0b57f069ddccded4e12686ff34bd0b5fbeb29d381ec3e8d50 Oct 11 04:10:52 crc kubenswrapper[4798]: I1011 04:10:52.372481 4798 generic.go:334] "Generic (PLEG): container finished" podID="657bf716-9609-4533-a42f-f0f58f40884d" containerID="eddd09c98c7ff46535ab323792d16179eeb6c27c1458ecb69baf1e866d44660e" exitCode=0 Oct 11 04:10:52 crc kubenswrapper[4798]: I1011 04:10:52.372685 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-sv678-config-l8plh" event={"ID":"657bf716-9609-4533-a42f-f0f58f40884d","Type":"ContainerDied","Data":"eddd09c98c7ff46535ab323792d16179eeb6c27c1458ecb69baf1e866d44660e"} Oct 11 04:10:52 crc kubenswrapper[4798]: I1011 04:10:52.372941 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-sv678-config-l8plh" event={"ID":"657bf716-9609-4533-a42f-f0f58f40884d","Type":"ContainerStarted","Data":"ce839fc0296dbfe0b57f069ddccded4e12686ff34bd0b5fbeb29d381ec3e8d50"} Oct 11 04:10:59 crc kubenswrapper[4798]: I1011 04:10:59.225218 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-3b28-account-create-7w992" Oct 11 04:10:59 crc kubenswrapper[4798]: I1011 04:10:59.238223 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-sv678-config-l8plh" Oct 11 04:10:59 crc kubenswrapper[4798]: I1011 04:10:59.341415 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-n4fq5\" (UniqueName: \"kubernetes.io/projected/1c998cfd-5665-44b4-865c-122eaa9effac-kube-api-access-n4fq5\") pod \"1c998cfd-5665-44b4-865c-122eaa9effac\" (UID: \"1c998cfd-5665-44b4-865c-122eaa9effac\") " Oct 11 04:10:59 crc kubenswrapper[4798]: I1011 04:10:59.341609 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/657bf716-9609-4533-a42f-f0f58f40884d-additional-scripts\") pod \"657bf716-9609-4533-a42f-f0f58f40884d\" (UID: \"657bf716-9609-4533-a42f-f0f58f40884d\") " Oct 11 04:10:59 crc kubenswrapper[4798]: I1011 04:10:59.341721 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/657bf716-9609-4533-a42f-f0f58f40884d-var-run\") pod \"657bf716-9609-4533-a42f-f0f58f40884d\" (UID: \"657bf716-9609-4533-a42f-f0f58f40884d\") " Oct 11 04:10:59 crc kubenswrapper[4798]: I1011 04:10:59.341752 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8xgq6\" (UniqueName: \"kubernetes.io/projected/657bf716-9609-4533-a42f-f0f58f40884d-kube-api-access-8xgq6\") pod \"657bf716-9609-4533-a42f-f0f58f40884d\" (UID: \"657bf716-9609-4533-a42f-f0f58f40884d\") " Oct 11 04:10:59 crc kubenswrapper[4798]: I1011 04:10:59.341823 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/657bf716-9609-4533-a42f-f0f58f40884d-var-run" (OuterVolumeSpecName: "var-run") pod "657bf716-9609-4533-a42f-f0f58f40884d" (UID: "657bf716-9609-4533-a42f-f0f58f40884d"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 11 04:10:59 crc kubenswrapper[4798]: I1011 04:10:59.341848 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/657bf716-9609-4533-a42f-f0f58f40884d-scripts\") pod \"657bf716-9609-4533-a42f-f0f58f40884d\" (UID: \"657bf716-9609-4533-a42f-f0f58f40884d\") " Oct 11 04:10:59 crc kubenswrapper[4798]: I1011 04:10:59.341878 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/657bf716-9609-4533-a42f-f0f58f40884d-var-log-ovn\") pod \"657bf716-9609-4533-a42f-f0f58f40884d\" (UID: \"657bf716-9609-4533-a42f-f0f58f40884d\") " Oct 11 04:10:59 crc kubenswrapper[4798]: I1011 04:10:59.341922 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/657bf716-9609-4533-a42f-f0f58f40884d-var-run-ovn\") pod \"657bf716-9609-4533-a42f-f0f58f40884d\" (UID: \"657bf716-9609-4533-a42f-f0f58f40884d\") " Oct 11 04:10:59 crc kubenswrapper[4798]: I1011 04:10:59.342177 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/657bf716-9609-4533-a42f-f0f58f40884d-var-log-ovn" (OuterVolumeSpecName: "var-log-ovn") pod "657bf716-9609-4533-a42f-f0f58f40884d" (UID: "657bf716-9609-4533-a42f-f0f58f40884d"). InnerVolumeSpecName "var-log-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 11 04:10:59 crc kubenswrapper[4798]: I1011 04:10:59.342257 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/657bf716-9609-4533-a42f-f0f58f40884d-var-run-ovn" (OuterVolumeSpecName: "var-run-ovn") pod "657bf716-9609-4533-a42f-f0f58f40884d" (UID: "657bf716-9609-4533-a42f-f0f58f40884d"). InnerVolumeSpecName "var-run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 11 04:10:59 crc kubenswrapper[4798]: I1011 04:10:59.342465 4798 reconciler_common.go:293] "Volume detached for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/657bf716-9609-4533-a42f-f0f58f40884d-var-log-ovn\") on node \"crc\" DevicePath \"\"" Oct 11 04:10:59 crc kubenswrapper[4798]: I1011 04:10:59.342485 4798 reconciler_common.go:293] "Volume detached for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/657bf716-9609-4533-a42f-f0f58f40884d-var-run-ovn\") on node \"crc\" DevicePath \"\"" Oct 11 04:10:59 crc kubenswrapper[4798]: I1011 04:10:59.342494 4798 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/657bf716-9609-4533-a42f-f0f58f40884d-var-run\") on node \"crc\" DevicePath \"\"" Oct 11 04:10:59 crc kubenswrapper[4798]: I1011 04:10:59.343323 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/657bf716-9609-4533-a42f-f0f58f40884d-additional-scripts" (OuterVolumeSpecName: "additional-scripts") pod "657bf716-9609-4533-a42f-f0f58f40884d" (UID: "657bf716-9609-4533-a42f-f0f58f40884d"). InnerVolumeSpecName "additional-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 04:10:59 crc kubenswrapper[4798]: I1011 04:10:59.343810 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/657bf716-9609-4533-a42f-f0f58f40884d-scripts" (OuterVolumeSpecName: "scripts") pod "657bf716-9609-4533-a42f-f0f58f40884d" (UID: "657bf716-9609-4533-a42f-f0f58f40884d"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 04:10:59 crc kubenswrapper[4798]: I1011 04:10:59.345967 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/657bf716-9609-4533-a42f-f0f58f40884d-kube-api-access-8xgq6" (OuterVolumeSpecName: "kube-api-access-8xgq6") pod "657bf716-9609-4533-a42f-f0f58f40884d" (UID: "657bf716-9609-4533-a42f-f0f58f40884d"). InnerVolumeSpecName "kube-api-access-8xgq6". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 04:10:59 crc kubenswrapper[4798]: I1011 04:10:59.347341 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1c998cfd-5665-44b4-865c-122eaa9effac-kube-api-access-n4fq5" (OuterVolumeSpecName: "kube-api-access-n4fq5") pod "1c998cfd-5665-44b4-865c-122eaa9effac" (UID: "1c998cfd-5665-44b4-865c-122eaa9effac"). InnerVolumeSpecName "kube-api-access-n4fq5". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 04:10:59 crc kubenswrapper[4798]: I1011 04:10:59.442271 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-3b28-account-create-7w992" Oct 11 04:10:59 crc kubenswrapper[4798]: I1011 04:10:59.442852 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-3b28-account-create-7w992" event={"ID":"1c998cfd-5665-44b4-865c-122eaa9effac","Type":"ContainerDied","Data":"c5cf75f7115a5bf57f4d76b6c370e96e453dbd32f1aa85d280f431b56036901b"} Oct 11 04:10:59 crc kubenswrapper[4798]: I1011 04:10:59.442992 4798 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c5cf75f7115a5bf57f4d76b6c370e96e453dbd32f1aa85d280f431b56036901b" Oct 11 04:10:59 crc kubenswrapper[4798]: I1011 04:10:59.444215 4798 reconciler_common.go:293] "Volume detached for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/657bf716-9609-4533-a42f-f0f58f40884d-additional-scripts\") on node \"crc\" DevicePath \"\"" Oct 11 04:10:59 crc kubenswrapper[4798]: I1011 04:10:59.444257 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8xgq6\" (UniqueName: \"kubernetes.io/projected/657bf716-9609-4533-a42f-f0f58f40884d-kube-api-access-8xgq6\") on node \"crc\" DevicePath \"\"" Oct 11 04:10:59 crc kubenswrapper[4798]: I1011 04:10:59.444278 4798 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/657bf716-9609-4533-a42f-f0f58f40884d-scripts\") on node \"crc\" DevicePath \"\"" Oct 11 04:10:59 crc kubenswrapper[4798]: I1011 04:10:59.444314 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-n4fq5\" (UniqueName: \"kubernetes.io/projected/1c998cfd-5665-44b4-865c-122eaa9effac-kube-api-access-n4fq5\") on node \"crc\" DevicePath \"\"" Oct 11 04:10:59 crc kubenswrapper[4798]: I1011 04:10:59.444667 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-sv678-config-l8plh" event={"ID":"657bf716-9609-4533-a42f-f0f58f40884d","Type":"ContainerDied","Data":"ce839fc0296dbfe0b57f069ddccded4e12686ff34bd0b5fbeb29d381ec3e8d50"} Oct 11 04:10:59 crc kubenswrapper[4798]: I1011 04:10:59.444709 4798 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ce839fc0296dbfe0b57f069ddccded4e12686ff34bd0b5fbeb29d381ec3e8d50" Oct 11 04:10:59 crc kubenswrapper[4798]: I1011 04:10:59.444789 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-sv678-config-l8plh" Oct 11 04:11:00 crc kubenswrapper[4798]: I1011 04:11:00.320661 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-sv678-config-l8plh"] Oct 11 04:11:00 crc kubenswrapper[4798]: I1011 04:11:00.329115 4798 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-sv678-config-l8plh"] Oct 11 04:11:00 crc kubenswrapper[4798]: I1011 04:11:00.451974 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-tlsp9" event={"ID":"916c6d8b-b83d-4f53-8bfd-7e8c8edbb2ad","Type":"ContainerStarted","Data":"a87b49e0a5ccb29297c80c6c43b764b62064414063987839fb791f90b874c281"} Oct 11 04:11:00 crc kubenswrapper[4798]: I1011 04:11:00.471668 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-db-sync-tlsp9" podStartSLOduration=2.9587796490000002 podStartE2EDuration="15.471647082s" podCreationTimestamp="2025-10-11 04:10:45 +0000 UTC" firstStartedPulling="2025-10-11 04:10:46.577486779 +0000 UTC m=+941.913776475" lastFinishedPulling="2025-10-11 04:10:59.090354222 +0000 UTC m=+954.426643908" observedRunningTime="2025-10-11 04:11:00.468811694 +0000 UTC m=+955.805101380" watchObservedRunningTime="2025-10-11 04:11:00.471647082 +0000 UTC m=+955.807936768" Oct 11 04:11:01 crc kubenswrapper[4798]: I1011 04:11:01.433746 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="657bf716-9609-4533-a42f-f0f58f40884d" path="/var/lib/kubelet/pods/657bf716-9609-4533-a42f-f0f58f40884d/volumes" Oct 11 04:11:06 crc kubenswrapper[4798]: I1011 04:11:06.591683 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-0" Oct 11 04:11:06 crc kubenswrapper[4798]: I1011 04:11:06.952706 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-cell1-server-0" Oct 11 04:11:07 crc kubenswrapper[4798]: I1011 04:11:07.032852 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-create-rpgmd"] Oct 11 04:11:07 crc kubenswrapper[4798]: E1011 04:11:07.033421 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="657bf716-9609-4533-a42f-f0f58f40884d" containerName="ovn-config" Oct 11 04:11:07 crc kubenswrapper[4798]: I1011 04:11:07.033515 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="657bf716-9609-4533-a42f-f0f58f40884d" containerName="ovn-config" Oct 11 04:11:07 crc kubenswrapper[4798]: E1011 04:11:07.033581 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1c998cfd-5665-44b4-865c-122eaa9effac" containerName="mariadb-account-create" Oct 11 04:11:07 crc kubenswrapper[4798]: I1011 04:11:07.033634 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="1c998cfd-5665-44b4-865c-122eaa9effac" containerName="mariadb-account-create" Oct 11 04:11:07 crc kubenswrapper[4798]: I1011 04:11:07.033866 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="657bf716-9609-4533-a42f-f0f58f40884d" containerName="ovn-config" Oct 11 04:11:07 crc kubenswrapper[4798]: I1011 04:11:07.034000 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="1c998cfd-5665-44b4-865c-122eaa9effac" containerName="mariadb-account-create" Oct 11 04:11:07 crc kubenswrapper[4798]: I1011 04:11:07.035437 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-rpgmd" Oct 11 04:11:07 crc kubenswrapper[4798]: I1011 04:11:07.050963 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-rpgmd"] Oct 11 04:11:07 crc kubenswrapper[4798]: I1011 04:11:07.087132 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bgrsc\" (UniqueName: \"kubernetes.io/projected/5cba417d-7df3-4129-80f7-29514fd805b2-kube-api-access-bgrsc\") pod \"cinder-db-create-rpgmd\" (UID: \"5cba417d-7df3-4129-80f7-29514fd805b2\") " pod="openstack/cinder-db-create-rpgmd" Oct 11 04:11:07 crc kubenswrapper[4798]: I1011 04:11:07.122349 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-create-s7bg6"] Oct 11 04:11:07 crc kubenswrapper[4798]: I1011 04:11:07.123829 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-s7bg6" Oct 11 04:11:07 crc kubenswrapper[4798]: I1011 04:11:07.138429 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-s7bg6"] Oct 11 04:11:07 crc kubenswrapper[4798]: I1011 04:11:07.188877 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bgrsc\" (UniqueName: \"kubernetes.io/projected/5cba417d-7df3-4129-80f7-29514fd805b2-kube-api-access-bgrsc\") pod \"cinder-db-create-rpgmd\" (UID: \"5cba417d-7df3-4129-80f7-29514fd805b2\") " pod="openstack/cinder-db-create-rpgmd" Oct 11 04:11:07 crc kubenswrapper[4798]: I1011 04:11:07.189015 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hxdtp\" (UniqueName: \"kubernetes.io/projected/00f6ca11-f294-4a81-8acd-d5fe3c5547fd-kube-api-access-hxdtp\") pod \"barbican-db-create-s7bg6\" (UID: \"00f6ca11-f294-4a81-8acd-d5fe3c5547fd\") " pod="openstack/barbican-db-create-s7bg6" Oct 11 04:11:07 crc kubenswrapper[4798]: I1011 04:11:07.217853 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bgrsc\" (UniqueName: \"kubernetes.io/projected/5cba417d-7df3-4129-80f7-29514fd805b2-kube-api-access-bgrsc\") pod \"cinder-db-create-rpgmd\" (UID: \"5cba417d-7df3-4129-80f7-29514fd805b2\") " pod="openstack/cinder-db-create-rpgmd" Oct 11 04:11:07 crc kubenswrapper[4798]: I1011 04:11:07.290837 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hxdtp\" (UniqueName: \"kubernetes.io/projected/00f6ca11-f294-4a81-8acd-d5fe3c5547fd-kube-api-access-hxdtp\") pod \"barbican-db-create-s7bg6\" (UID: \"00f6ca11-f294-4a81-8acd-d5fe3c5547fd\") " pod="openstack/barbican-db-create-s7bg6" Oct 11 04:11:07 crc kubenswrapper[4798]: I1011 04:11:07.292065 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-sync-fqtt8"] Oct 11 04:11:07 crc kubenswrapper[4798]: I1011 04:11:07.293541 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-fqtt8" Oct 11 04:11:07 crc kubenswrapper[4798]: I1011 04:11:07.296202 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Oct 11 04:11:07 crc kubenswrapper[4798]: I1011 04:11:07.302891 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-768mh" Oct 11 04:11:07 crc kubenswrapper[4798]: I1011 04:11:07.303125 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Oct 11 04:11:07 crc kubenswrapper[4798]: I1011 04:11:07.303212 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Oct 11 04:11:07 crc kubenswrapper[4798]: I1011 04:11:07.307301 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-fqtt8"] Oct 11 04:11:07 crc kubenswrapper[4798]: I1011 04:11:07.331322 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hxdtp\" (UniqueName: \"kubernetes.io/projected/00f6ca11-f294-4a81-8acd-d5fe3c5547fd-kube-api-access-hxdtp\") pod \"barbican-db-create-s7bg6\" (UID: \"00f6ca11-f294-4a81-8acd-d5fe3c5547fd\") " pod="openstack/barbican-db-create-s7bg6" Oct 11 04:11:07 crc kubenswrapper[4798]: I1011 04:11:07.363691 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-rpgmd" Oct 11 04:11:07 crc kubenswrapper[4798]: I1011 04:11:07.392623 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c56vs\" (UniqueName: \"kubernetes.io/projected/6554e121-a137-45e1-83c2-f24b987099ea-kube-api-access-c56vs\") pod \"keystone-db-sync-fqtt8\" (UID: \"6554e121-a137-45e1-83c2-f24b987099ea\") " pod="openstack/keystone-db-sync-fqtt8" Oct 11 04:11:07 crc kubenswrapper[4798]: I1011 04:11:07.392694 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6554e121-a137-45e1-83c2-f24b987099ea-combined-ca-bundle\") pod \"keystone-db-sync-fqtt8\" (UID: \"6554e121-a137-45e1-83c2-f24b987099ea\") " pod="openstack/keystone-db-sync-fqtt8" Oct 11 04:11:07 crc kubenswrapper[4798]: I1011 04:11:07.392717 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6554e121-a137-45e1-83c2-f24b987099ea-config-data\") pod \"keystone-db-sync-fqtt8\" (UID: \"6554e121-a137-45e1-83c2-f24b987099ea\") " pod="openstack/keystone-db-sync-fqtt8" Oct 11 04:11:07 crc kubenswrapper[4798]: I1011 04:11:07.443542 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-create-rbwr7"] Oct 11 04:11:07 crc kubenswrapper[4798]: I1011 04:11:07.450606 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-rbwr7" Oct 11 04:11:07 crc kubenswrapper[4798]: I1011 04:11:07.445884 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-s7bg6" Oct 11 04:11:07 crc kubenswrapper[4798]: I1011 04:11:07.483232 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-rbwr7"] Oct 11 04:11:07 crc kubenswrapper[4798]: I1011 04:11:07.498674 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8gw2v\" (UniqueName: \"kubernetes.io/projected/15bea8df-46e9-43fc-9aee-1a303682485c-kube-api-access-8gw2v\") pod \"neutron-db-create-rbwr7\" (UID: \"15bea8df-46e9-43fc-9aee-1a303682485c\") " pod="openstack/neutron-db-create-rbwr7" Oct 11 04:11:07 crc kubenswrapper[4798]: I1011 04:11:07.499115 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c56vs\" (UniqueName: \"kubernetes.io/projected/6554e121-a137-45e1-83c2-f24b987099ea-kube-api-access-c56vs\") pod \"keystone-db-sync-fqtt8\" (UID: \"6554e121-a137-45e1-83c2-f24b987099ea\") " pod="openstack/keystone-db-sync-fqtt8" Oct 11 04:11:07 crc kubenswrapper[4798]: I1011 04:11:07.499295 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6554e121-a137-45e1-83c2-f24b987099ea-combined-ca-bundle\") pod \"keystone-db-sync-fqtt8\" (UID: \"6554e121-a137-45e1-83c2-f24b987099ea\") " pod="openstack/keystone-db-sync-fqtt8" Oct 11 04:11:07 crc kubenswrapper[4798]: I1011 04:11:07.499435 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6554e121-a137-45e1-83c2-f24b987099ea-config-data\") pod \"keystone-db-sync-fqtt8\" (UID: \"6554e121-a137-45e1-83c2-f24b987099ea\") " pod="openstack/keystone-db-sync-fqtt8" Oct 11 04:11:07 crc kubenswrapper[4798]: I1011 04:11:07.508535 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6554e121-a137-45e1-83c2-f24b987099ea-combined-ca-bundle\") pod \"keystone-db-sync-fqtt8\" (UID: \"6554e121-a137-45e1-83c2-f24b987099ea\") " pod="openstack/keystone-db-sync-fqtt8" Oct 11 04:11:07 crc kubenswrapper[4798]: I1011 04:11:07.511324 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6554e121-a137-45e1-83c2-f24b987099ea-config-data\") pod \"keystone-db-sync-fqtt8\" (UID: \"6554e121-a137-45e1-83c2-f24b987099ea\") " pod="openstack/keystone-db-sync-fqtt8" Oct 11 04:11:07 crc kubenswrapper[4798]: I1011 04:11:07.519590 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c56vs\" (UniqueName: \"kubernetes.io/projected/6554e121-a137-45e1-83c2-f24b987099ea-kube-api-access-c56vs\") pod \"keystone-db-sync-fqtt8\" (UID: \"6554e121-a137-45e1-83c2-f24b987099ea\") " pod="openstack/keystone-db-sync-fqtt8" Oct 11 04:11:07 crc kubenswrapper[4798]: I1011 04:11:07.601970 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8gw2v\" (UniqueName: \"kubernetes.io/projected/15bea8df-46e9-43fc-9aee-1a303682485c-kube-api-access-8gw2v\") pod \"neutron-db-create-rbwr7\" (UID: \"15bea8df-46e9-43fc-9aee-1a303682485c\") " pod="openstack/neutron-db-create-rbwr7" Oct 11 04:11:07 crc kubenswrapper[4798]: I1011 04:11:07.617672 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-fqtt8" Oct 11 04:11:07 crc kubenswrapper[4798]: I1011 04:11:07.621681 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8gw2v\" (UniqueName: \"kubernetes.io/projected/15bea8df-46e9-43fc-9aee-1a303682485c-kube-api-access-8gw2v\") pod \"neutron-db-create-rbwr7\" (UID: \"15bea8df-46e9-43fc-9aee-1a303682485c\") " pod="openstack/neutron-db-create-rbwr7" Oct 11 04:11:07 crc kubenswrapper[4798]: I1011 04:11:07.848070 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-rbwr7" Oct 11 04:11:07 crc kubenswrapper[4798]: I1011 04:11:07.938474 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-fqtt8"] Oct 11 04:11:07 crc kubenswrapper[4798]: I1011 04:11:07.959309 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-rpgmd"] Oct 11 04:11:08 crc kubenswrapper[4798]: W1011 04:11:08.004239 4798 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6554e121_a137_45e1_83c2_f24b987099ea.slice/crio-27f6623c98e0456a5e2ee37e7e68f75d5ebccd611bc3477665f6633844e76e73 WatchSource:0}: Error finding container 27f6623c98e0456a5e2ee37e7e68f75d5ebccd611bc3477665f6633844e76e73: Status 404 returned error can't find the container with id 27f6623c98e0456a5e2ee37e7e68f75d5ebccd611bc3477665f6633844e76e73 Oct 11 04:11:08 crc kubenswrapper[4798]: I1011 04:11:08.046288 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-s7bg6"] Oct 11 04:11:08 crc kubenswrapper[4798]: I1011 04:11:08.151650 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-rbwr7"] Oct 11 04:11:08 crc kubenswrapper[4798]: W1011 04:11:08.164850 4798 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod15bea8df_46e9_43fc_9aee_1a303682485c.slice/crio-ef2688bd9c85c3e5faa41569756bacf98431a32f772ccb0cb3c4354df687cf91 WatchSource:0}: Error finding container ef2688bd9c85c3e5faa41569756bacf98431a32f772ccb0cb3c4354df687cf91: Status 404 returned error can't find the container with id ef2688bd9c85c3e5faa41569756bacf98431a32f772ccb0cb3c4354df687cf91 Oct 11 04:11:08 crc kubenswrapper[4798]: I1011 04:11:08.531407 4798 generic.go:334] "Generic (PLEG): container finished" podID="5cba417d-7df3-4129-80f7-29514fd805b2" containerID="335f06c3c29669acc45bb6c719107e7af3f4a3b2766268cec4a2cc3963526b43" exitCode=0 Oct 11 04:11:08 crc kubenswrapper[4798]: I1011 04:11:08.531531 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-rpgmd" event={"ID":"5cba417d-7df3-4129-80f7-29514fd805b2","Type":"ContainerDied","Data":"335f06c3c29669acc45bb6c719107e7af3f4a3b2766268cec4a2cc3963526b43"} Oct 11 04:11:08 crc kubenswrapper[4798]: I1011 04:11:08.531612 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-rpgmd" event={"ID":"5cba417d-7df3-4129-80f7-29514fd805b2","Type":"ContainerStarted","Data":"8fdbc96d55872cfccebcb572d8ca40e7bded3b41e50f594cdeb19bae052d33f8"} Oct 11 04:11:08 crc kubenswrapper[4798]: I1011 04:11:08.533899 4798 generic.go:334] "Generic (PLEG): container finished" podID="916c6d8b-b83d-4f53-8bfd-7e8c8edbb2ad" containerID="a87b49e0a5ccb29297c80c6c43b764b62064414063987839fb791f90b874c281" exitCode=0 Oct 11 04:11:08 crc kubenswrapper[4798]: I1011 04:11:08.533992 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-tlsp9" event={"ID":"916c6d8b-b83d-4f53-8bfd-7e8c8edbb2ad","Type":"ContainerDied","Data":"a87b49e0a5ccb29297c80c6c43b764b62064414063987839fb791f90b874c281"} Oct 11 04:11:08 crc kubenswrapper[4798]: I1011 04:11:08.535615 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-rbwr7" event={"ID":"15bea8df-46e9-43fc-9aee-1a303682485c","Type":"ContainerStarted","Data":"ef2688bd9c85c3e5faa41569756bacf98431a32f772ccb0cb3c4354df687cf91"} Oct 11 04:11:08 crc kubenswrapper[4798]: I1011 04:11:08.536826 4798 generic.go:334] "Generic (PLEG): container finished" podID="00f6ca11-f294-4a81-8acd-d5fe3c5547fd" containerID="66c42d5e14e026c76f83a4fd403d4d01e4ff70049715c794e92df73a16356279" exitCode=0 Oct 11 04:11:08 crc kubenswrapper[4798]: I1011 04:11:08.536869 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-s7bg6" event={"ID":"00f6ca11-f294-4a81-8acd-d5fe3c5547fd","Type":"ContainerDied","Data":"66c42d5e14e026c76f83a4fd403d4d01e4ff70049715c794e92df73a16356279"} Oct 11 04:11:08 crc kubenswrapper[4798]: I1011 04:11:08.536912 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-s7bg6" event={"ID":"00f6ca11-f294-4a81-8acd-d5fe3c5547fd","Type":"ContainerStarted","Data":"283ae0b9bd8f2afc717e1aa11b033bd7d303228c151b13cfcb687a25d5368477"} Oct 11 04:11:08 crc kubenswrapper[4798]: I1011 04:11:08.537921 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-fqtt8" event={"ID":"6554e121-a137-45e1-83c2-f24b987099ea","Type":"ContainerStarted","Data":"27f6623c98e0456a5e2ee37e7e68f75d5ebccd611bc3477665f6633844e76e73"} Oct 11 04:11:09 crc kubenswrapper[4798]: I1011 04:11:09.546815 4798 generic.go:334] "Generic (PLEG): container finished" podID="15bea8df-46e9-43fc-9aee-1a303682485c" containerID="4d1f27351d234df3025876454d6a4374ca0041c9421ca38c98902eddc7053ac9" exitCode=0 Oct 11 04:11:09 crc kubenswrapper[4798]: I1011 04:11:09.548008 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-rbwr7" event={"ID":"15bea8df-46e9-43fc-9aee-1a303682485c","Type":"ContainerDied","Data":"4d1f27351d234df3025876454d6a4374ca0041c9421ca38c98902eddc7053ac9"} Oct 11 04:11:10 crc kubenswrapper[4798]: I1011 04:11:10.068704 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-rpgmd" Oct 11 04:11:10 crc kubenswrapper[4798]: I1011 04:11:10.082565 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-s7bg6" Oct 11 04:11:10 crc kubenswrapper[4798]: I1011 04:11:10.094179 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-tlsp9" Oct 11 04:11:10 crc kubenswrapper[4798]: I1011 04:11:10.150226 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/916c6d8b-b83d-4f53-8bfd-7e8c8edbb2ad-db-sync-config-data\") pod \"916c6d8b-b83d-4f53-8bfd-7e8c8edbb2ad\" (UID: \"916c6d8b-b83d-4f53-8bfd-7e8c8edbb2ad\") " Oct 11 04:11:10 crc kubenswrapper[4798]: I1011 04:11:10.150311 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/916c6d8b-b83d-4f53-8bfd-7e8c8edbb2ad-combined-ca-bundle\") pod \"916c6d8b-b83d-4f53-8bfd-7e8c8edbb2ad\" (UID: \"916c6d8b-b83d-4f53-8bfd-7e8c8edbb2ad\") " Oct 11 04:11:10 crc kubenswrapper[4798]: I1011 04:11:10.150385 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/916c6d8b-b83d-4f53-8bfd-7e8c8edbb2ad-config-data\") pod \"916c6d8b-b83d-4f53-8bfd-7e8c8edbb2ad\" (UID: \"916c6d8b-b83d-4f53-8bfd-7e8c8edbb2ad\") " Oct 11 04:11:10 crc kubenswrapper[4798]: I1011 04:11:10.150436 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bgrsc\" (UniqueName: \"kubernetes.io/projected/5cba417d-7df3-4129-80f7-29514fd805b2-kube-api-access-bgrsc\") pod \"5cba417d-7df3-4129-80f7-29514fd805b2\" (UID: \"5cba417d-7df3-4129-80f7-29514fd805b2\") " Oct 11 04:11:10 crc kubenswrapper[4798]: I1011 04:11:10.150462 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hxdtp\" (UniqueName: \"kubernetes.io/projected/00f6ca11-f294-4a81-8acd-d5fe3c5547fd-kube-api-access-hxdtp\") pod \"00f6ca11-f294-4a81-8acd-d5fe3c5547fd\" (UID: \"00f6ca11-f294-4a81-8acd-d5fe3c5547fd\") " Oct 11 04:11:10 crc kubenswrapper[4798]: I1011 04:11:10.150491 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7cw2n\" (UniqueName: \"kubernetes.io/projected/916c6d8b-b83d-4f53-8bfd-7e8c8edbb2ad-kube-api-access-7cw2n\") pod \"916c6d8b-b83d-4f53-8bfd-7e8c8edbb2ad\" (UID: \"916c6d8b-b83d-4f53-8bfd-7e8c8edbb2ad\") " Oct 11 04:11:10 crc kubenswrapper[4798]: I1011 04:11:10.158624 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5cba417d-7df3-4129-80f7-29514fd805b2-kube-api-access-bgrsc" (OuterVolumeSpecName: "kube-api-access-bgrsc") pod "5cba417d-7df3-4129-80f7-29514fd805b2" (UID: "5cba417d-7df3-4129-80f7-29514fd805b2"). InnerVolumeSpecName "kube-api-access-bgrsc". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 04:11:10 crc kubenswrapper[4798]: I1011 04:11:10.158849 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/916c6d8b-b83d-4f53-8bfd-7e8c8edbb2ad-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "916c6d8b-b83d-4f53-8bfd-7e8c8edbb2ad" (UID: "916c6d8b-b83d-4f53-8bfd-7e8c8edbb2ad"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:11:10 crc kubenswrapper[4798]: I1011 04:11:10.169386 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/00f6ca11-f294-4a81-8acd-d5fe3c5547fd-kube-api-access-hxdtp" (OuterVolumeSpecName: "kube-api-access-hxdtp") pod "00f6ca11-f294-4a81-8acd-d5fe3c5547fd" (UID: "00f6ca11-f294-4a81-8acd-d5fe3c5547fd"). InnerVolumeSpecName "kube-api-access-hxdtp". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 04:11:10 crc kubenswrapper[4798]: I1011 04:11:10.169555 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/916c6d8b-b83d-4f53-8bfd-7e8c8edbb2ad-kube-api-access-7cw2n" (OuterVolumeSpecName: "kube-api-access-7cw2n") pod "916c6d8b-b83d-4f53-8bfd-7e8c8edbb2ad" (UID: "916c6d8b-b83d-4f53-8bfd-7e8c8edbb2ad"). InnerVolumeSpecName "kube-api-access-7cw2n". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 04:11:10 crc kubenswrapper[4798]: I1011 04:11:10.180738 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/916c6d8b-b83d-4f53-8bfd-7e8c8edbb2ad-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "916c6d8b-b83d-4f53-8bfd-7e8c8edbb2ad" (UID: "916c6d8b-b83d-4f53-8bfd-7e8c8edbb2ad"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:11:10 crc kubenswrapper[4798]: I1011 04:11:10.217209 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/916c6d8b-b83d-4f53-8bfd-7e8c8edbb2ad-config-data" (OuterVolumeSpecName: "config-data") pod "916c6d8b-b83d-4f53-8bfd-7e8c8edbb2ad" (UID: "916c6d8b-b83d-4f53-8bfd-7e8c8edbb2ad"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:11:10 crc kubenswrapper[4798]: I1011 04:11:10.255989 4798 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/916c6d8b-b83d-4f53-8bfd-7e8c8edbb2ad-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Oct 11 04:11:10 crc kubenswrapper[4798]: I1011 04:11:10.256025 4798 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/916c6d8b-b83d-4f53-8bfd-7e8c8edbb2ad-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 11 04:11:10 crc kubenswrapper[4798]: I1011 04:11:10.256037 4798 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/916c6d8b-b83d-4f53-8bfd-7e8c8edbb2ad-config-data\") on node \"crc\" DevicePath \"\"" Oct 11 04:11:10 crc kubenswrapper[4798]: I1011 04:11:10.256048 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bgrsc\" (UniqueName: \"kubernetes.io/projected/5cba417d-7df3-4129-80f7-29514fd805b2-kube-api-access-bgrsc\") on node \"crc\" DevicePath \"\"" Oct 11 04:11:10 crc kubenswrapper[4798]: I1011 04:11:10.256062 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hxdtp\" (UniqueName: \"kubernetes.io/projected/00f6ca11-f294-4a81-8acd-d5fe3c5547fd-kube-api-access-hxdtp\") on node \"crc\" DevicePath \"\"" Oct 11 04:11:10 crc kubenswrapper[4798]: I1011 04:11:10.256073 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7cw2n\" (UniqueName: \"kubernetes.io/projected/916c6d8b-b83d-4f53-8bfd-7e8c8edbb2ad-kube-api-access-7cw2n\") on node \"crc\" DevicePath \"\"" Oct 11 04:11:10 crc kubenswrapper[4798]: I1011 04:11:10.559511 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-s7bg6" event={"ID":"00f6ca11-f294-4a81-8acd-d5fe3c5547fd","Type":"ContainerDied","Data":"283ae0b9bd8f2afc717e1aa11b033bd7d303228c151b13cfcb687a25d5368477"} Oct 11 04:11:10 crc kubenswrapper[4798]: I1011 04:11:10.559976 4798 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="283ae0b9bd8f2afc717e1aa11b033bd7d303228c151b13cfcb687a25d5368477" Oct 11 04:11:10 crc kubenswrapper[4798]: I1011 04:11:10.559542 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-s7bg6" Oct 11 04:11:10 crc kubenswrapper[4798]: I1011 04:11:10.570821 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-rpgmd" event={"ID":"5cba417d-7df3-4129-80f7-29514fd805b2","Type":"ContainerDied","Data":"8fdbc96d55872cfccebcb572d8ca40e7bded3b41e50f594cdeb19bae052d33f8"} Oct 11 04:11:10 crc kubenswrapper[4798]: I1011 04:11:10.570864 4798 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8fdbc96d55872cfccebcb572d8ca40e7bded3b41e50f594cdeb19bae052d33f8" Oct 11 04:11:10 crc kubenswrapper[4798]: I1011 04:11:10.570916 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-rpgmd" Oct 11 04:11:10 crc kubenswrapper[4798]: I1011 04:11:10.575287 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-tlsp9" event={"ID":"916c6d8b-b83d-4f53-8bfd-7e8c8edbb2ad","Type":"ContainerDied","Data":"bb6083f20716e514353067bfa8dc49d2e7ef78315258ecb9bb2780759b7da578"} Oct 11 04:11:10 crc kubenswrapper[4798]: I1011 04:11:10.575360 4798 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="bb6083f20716e514353067bfa8dc49d2e7ef78315258ecb9bb2780759b7da578" Oct 11 04:11:10 crc kubenswrapper[4798]: I1011 04:11:10.575385 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-tlsp9" Oct 11 04:11:10 crc kubenswrapper[4798]: I1011 04:11:10.985335 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-554567b4f7-z8l5w"] Oct 11 04:11:10 crc kubenswrapper[4798]: E1011 04:11:10.985738 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="00f6ca11-f294-4a81-8acd-d5fe3c5547fd" containerName="mariadb-database-create" Oct 11 04:11:10 crc kubenswrapper[4798]: I1011 04:11:10.985751 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="00f6ca11-f294-4a81-8acd-d5fe3c5547fd" containerName="mariadb-database-create" Oct 11 04:11:10 crc kubenswrapper[4798]: E1011 04:11:10.985783 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="916c6d8b-b83d-4f53-8bfd-7e8c8edbb2ad" containerName="glance-db-sync" Oct 11 04:11:10 crc kubenswrapper[4798]: I1011 04:11:10.985791 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="916c6d8b-b83d-4f53-8bfd-7e8c8edbb2ad" containerName="glance-db-sync" Oct 11 04:11:10 crc kubenswrapper[4798]: E1011 04:11:10.985806 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5cba417d-7df3-4129-80f7-29514fd805b2" containerName="mariadb-database-create" Oct 11 04:11:10 crc kubenswrapper[4798]: I1011 04:11:10.985812 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="5cba417d-7df3-4129-80f7-29514fd805b2" containerName="mariadb-database-create" Oct 11 04:11:10 crc kubenswrapper[4798]: I1011 04:11:10.985954 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="5cba417d-7df3-4129-80f7-29514fd805b2" containerName="mariadb-database-create" Oct 11 04:11:10 crc kubenswrapper[4798]: I1011 04:11:10.985967 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="00f6ca11-f294-4a81-8acd-d5fe3c5547fd" containerName="mariadb-database-create" Oct 11 04:11:10 crc kubenswrapper[4798]: I1011 04:11:10.985989 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="916c6d8b-b83d-4f53-8bfd-7e8c8edbb2ad" containerName="glance-db-sync" Oct 11 04:11:10 crc kubenswrapper[4798]: I1011 04:11:10.988932 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-554567b4f7-z8l5w" Oct 11 04:11:11 crc kubenswrapper[4798]: I1011 04:11:11.024572 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-554567b4f7-z8l5w"] Oct 11 04:11:11 crc kubenswrapper[4798]: I1011 04:11:11.072000 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/482591d3-dd16-4a35-9c65-45ab2ad7d057-ovsdbserver-nb\") pod \"dnsmasq-dns-554567b4f7-z8l5w\" (UID: \"482591d3-dd16-4a35-9c65-45ab2ad7d057\") " pod="openstack/dnsmasq-dns-554567b4f7-z8l5w" Oct 11 04:11:11 crc kubenswrapper[4798]: I1011 04:11:11.072071 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/482591d3-dd16-4a35-9c65-45ab2ad7d057-config\") pod \"dnsmasq-dns-554567b4f7-z8l5w\" (UID: \"482591d3-dd16-4a35-9c65-45ab2ad7d057\") " pod="openstack/dnsmasq-dns-554567b4f7-z8l5w" Oct 11 04:11:11 crc kubenswrapper[4798]: I1011 04:11:11.072108 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fz662\" (UniqueName: \"kubernetes.io/projected/482591d3-dd16-4a35-9c65-45ab2ad7d057-kube-api-access-fz662\") pod \"dnsmasq-dns-554567b4f7-z8l5w\" (UID: \"482591d3-dd16-4a35-9c65-45ab2ad7d057\") " pod="openstack/dnsmasq-dns-554567b4f7-z8l5w" Oct 11 04:11:11 crc kubenswrapper[4798]: I1011 04:11:11.072125 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/482591d3-dd16-4a35-9c65-45ab2ad7d057-ovsdbserver-sb\") pod \"dnsmasq-dns-554567b4f7-z8l5w\" (UID: \"482591d3-dd16-4a35-9c65-45ab2ad7d057\") " pod="openstack/dnsmasq-dns-554567b4f7-z8l5w" Oct 11 04:11:11 crc kubenswrapper[4798]: I1011 04:11:11.072167 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/482591d3-dd16-4a35-9c65-45ab2ad7d057-dns-svc\") pod \"dnsmasq-dns-554567b4f7-z8l5w\" (UID: \"482591d3-dd16-4a35-9c65-45ab2ad7d057\") " pod="openstack/dnsmasq-dns-554567b4f7-z8l5w" Oct 11 04:11:11 crc kubenswrapper[4798]: I1011 04:11:11.173545 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/482591d3-dd16-4a35-9c65-45ab2ad7d057-config\") pod \"dnsmasq-dns-554567b4f7-z8l5w\" (UID: \"482591d3-dd16-4a35-9c65-45ab2ad7d057\") " pod="openstack/dnsmasq-dns-554567b4f7-z8l5w" Oct 11 04:11:11 crc kubenswrapper[4798]: I1011 04:11:11.173971 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fz662\" (UniqueName: \"kubernetes.io/projected/482591d3-dd16-4a35-9c65-45ab2ad7d057-kube-api-access-fz662\") pod \"dnsmasq-dns-554567b4f7-z8l5w\" (UID: \"482591d3-dd16-4a35-9c65-45ab2ad7d057\") " pod="openstack/dnsmasq-dns-554567b4f7-z8l5w" Oct 11 04:11:11 crc kubenswrapper[4798]: I1011 04:11:11.173995 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/482591d3-dd16-4a35-9c65-45ab2ad7d057-ovsdbserver-sb\") pod \"dnsmasq-dns-554567b4f7-z8l5w\" (UID: \"482591d3-dd16-4a35-9c65-45ab2ad7d057\") " pod="openstack/dnsmasq-dns-554567b4f7-z8l5w" Oct 11 04:11:11 crc kubenswrapper[4798]: I1011 04:11:11.174036 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/482591d3-dd16-4a35-9c65-45ab2ad7d057-dns-svc\") pod \"dnsmasq-dns-554567b4f7-z8l5w\" (UID: \"482591d3-dd16-4a35-9c65-45ab2ad7d057\") " pod="openstack/dnsmasq-dns-554567b4f7-z8l5w" Oct 11 04:11:11 crc kubenswrapper[4798]: I1011 04:11:11.174113 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/482591d3-dd16-4a35-9c65-45ab2ad7d057-ovsdbserver-nb\") pod \"dnsmasq-dns-554567b4f7-z8l5w\" (UID: \"482591d3-dd16-4a35-9c65-45ab2ad7d057\") " pod="openstack/dnsmasq-dns-554567b4f7-z8l5w" Oct 11 04:11:11 crc kubenswrapper[4798]: I1011 04:11:11.175850 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/482591d3-dd16-4a35-9c65-45ab2ad7d057-config\") pod \"dnsmasq-dns-554567b4f7-z8l5w\" (UID: \"482591d3-dd16-4a35-9c65-45ab2ad7d057\") " pod="openstack/dnsmasq-dns-554567b4f7-z8l5w" Oct 11 04:11:11 crc kubenswrapper[4798]: I1011 04:11:11.175887 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/482591d3-dd16-4a35-9c65-45ab2ad7d057-ovsdbserver-nb\") pod \"dnsmasq-dns-554567b4f7-z8l5w\" (UID: \"482591d3-dd16-4a35-9c65-45ab2ad7d057\") " pod="openstack/dnsmasq-dns-554567b4f7-z8l5w" Oct 11 04:11:11 crc kubenswrapper[4798]: I1011 04:11:11.176046 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/482591d3-dd16-4a35-9c65-45ab2ad7d057-dns-svc\") pod \"dnsmasq-dns-554567b4f7-z8l5w\" (UID: \"482591d3-dd16-4a35-9c65-45ab2ad7d057\") " pod="openstack/dnsmasq-dns-554567b4f7-z8l5w" Oct 11 04:11:11 crc kubenswrapper[4798]: I1011 04:11:11.176375 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/482591d3-dd16-4a35-9c65-45ab2ad7d057-ovsdbserver-sb\") pod \"dnsmasq-dns-554567b4f7-z8l5w\" (UID: \"482591d3-dd16-4a35-9c65-45ab2ad7d057\") " pod="openstack/dnsmasq-dns-554567b4f7-z8l5w" Oct 11 04:11:11 crc kubenswrapper[4798]: I1011 04:11:11.199090 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fz662\" (UniqueName: \"kubernetes.io/projected/482591d3-dd16-4a35-9c65-45ab2ad7d057-kube-api-access-fz662\") pod \"dnsmasq-dns-554567b4f7-z8l5w\" (UID: \"482591d3-dd16-4a35-9c65-45ab2ad7d057\") " pod="openstack/dnsmasq-dns-554567b4f7-z8l5w" Oct 11 04:11:11 crc kubenswrapper[4798]: I1011 04:11:11.311973 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-554567b4f7-z8l5w" Oct 11 04:11:13 crc kubenswrapper[4798]: I1011 04:11:13.219775 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-rbwr7" Oct 11 04:11:13 crc kubenswrapper[4798]: I1011 04:11:13.309132 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8gw2v\" (UniqueName: \"kubernetes.io/projected/15bea8df-46e9-43fc-9aee-1a303682485c-kube-api-access-8gw2v\") pod \"15bea8df-46e9-43fc-9aee-1a303682485c\" (UID: \"15bea8df-46e9-43fc-9aee-1a303682485c\") " Oct 11 04:11:13 crc kubenswrapper[4798]: I1011 04:11:13.314666 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/15bea8df-46e9-43fc-9aee-1a303682485c-kube-api-access-8gw2v" (OuterVolumeSpecName: "kube-api-access-8gw2v") pod "15bea8df-46e9-43fc-9aee-1a303682485c" (UID: "15bea8df-46e9-43fc-9aee-1a303682485c"). InnerVolumeSpecName "kube-api-access-8gw2v". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 04:11:13 crc kubenswrapper[4798]: I1011 04:11:13.411094 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8gw2v\" (UniqueName: \"kubernetes.io/projected/15bea8df-46e9-43fc-9aee-1a303682485c-kube-api-access-8gw2v\") on node \"crc\" DevicePath \"\"" Oct 11 04:11:13 crc kubenswrapper[4798]: I1011 04:11:13.580009 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-554567b4f7-z8l5w"] Oct 11 04:11:13 crc kubenswrapper[4798]: I1011 04:11:13.603256 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-fqtt8" event={"ID":"6554e121-a137-45e1-83c2-f24b987099ea","Type":"ContainerStarted","Data":"030525f9e7fdc8065aa8247288ea651fa2f72ac181e7e395bc50305c695da2f2"} Oct 11 04:11:13 crc kubenswrapper[4798]: I1011 04:11:13.604830 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-rbwr7" event={"ID":"15bea8df-46e9-43fc-9aee-1a303682485c","Type":"ContainerDied","Data":"ef2688bd9c85c3e5faa41569756bacf98431a32f772ccb0cb3c4354df687cf91"} Oct 11 04:11:13 crc kubenswrapper[4798]: I1011 04:11:13.604863 4798 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ef2688bd9c85c3e5faa41569756bacf98431a32f772ccb0cb3c4354df687cf91" Oct 11 04:11:13 crc kubenswrapper[4798]: I1011 04:11:13.604955 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-rbwr7" Oct 11 04:11:13 crc kubenswrapper[4798]: I1011 04:11:13.606786 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-554567b4f7-z8l5w" event={"ID":"482591d3-dd16-4a35-9c65-45ab2ad7d057","Type":"ContainerStarted","Data":"9ed041f125b355f112a48b9088da67647645a48613677b26e992690f12983372"} Oct 11 04:11:13 crc kubenswrapper[4798]: I1011 04:11:13.628125 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-db-sync-fqtt8" podStartSLOduration=1.421357384 podStartE2EDuration="6.628099559s" podCreationTimestamp="2025-10-11 04:11:07 +0000 UTC" firstStartedPulling="2025-10-11 04:11:08.018753181 +0000 UTC m=+963.355042867" lastFinishedPulling="2025-10-11 04:11:13.225495316 +0000 UTC m=+968.561785042" observedRunningTime="2025-10-11 04:11:13.62228532 +0000 UTC m=+968.958575006" watchObservedRunningTime="2025-10-11 04:11:13.628099559 +0000 UTC m=+968.964389265" Oct 11 04:11:14 crc kubenswrapper[4798]: I1011 04:11:14.617355 4798 generic.go:334] "Generic (PLEG): container finished" podID="482591d3-dd16-4a35-9c65-45ab2ad7d057" containerID="3942ae0a86df15daef63bd65eee6e49642fc2045e328bf85151f9543deeef441" exitCode=0 Oct 11 04:11:14 crc kubenswrapper[4798]: I1011 04:11:14.617490 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-554567b4f7-z8l5w" event={"ID":"482591d3-dd16-4a35-9c65-45ab2ad7d057","Type":"ContainerDied","Data":"3942ae0a86df15daef63bd65eee6e49642fc2045e328bf85151f9543deeef441"} Oct 11 04:11:15 crc kubenswrapper[4798]: I1011 04:11:15.633022 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-554567b4f7-z8l5w" event={"ID":"482591d3-dd16-4a35-9c65-45ab2ad7d057","Type":"ContainerStarted","Data":"5cf0a0b8d8350bfdda59d6790ac94f55d5cc147be0ea4e48fab62b8914c4977a"} Oct 11 04:11:15 crc kubenswrapper[4798]: I1011 04:11:15.633795 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-554567b4f7-z8l5w" Oct 11 04:11:15 crc kubenswrapper[4798]: I1011 04:11:15.654782 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-554567b4f7-z8l5w" podStartSLOduration=5.654750996 podStartE2EDuration="5.654750996s" podCreationTimestamp="2025-10-11 04:11:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 04:11:15.650997065 +0000 UTC m=+970.987286751" watchObservedRunningTime="2025-10-11 04:11:15.654750996 +0000 UTC m=+970.991040692" Oct 11 04:11:16 crc kubenswrapper[4798]: I1011 04:11:16.646317 4798 generic.go:334] "Generic (PLEG): container finished" podID="6554e121-a137-45e1-83c2-f24b987099ea" containerID="030525f9e7fdc8065aa8247288ea651fa2f72ac181e7e395bc50305c695da2f2" exitCode=0 Oct 11 04:11:16 crc kubenswrapper[4798]: I1011 04:11:16.646450 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-fqtt8" event={"ID":"6554e121-a137-45e1-83c2-f24b987099ea","Type":"ContainerDied","Data":"030525f9e7fdc8065aa8247288ea651fa2f72ac181e7e395bc50305c695da2f2"} Oct 11 04:11:17 crc kubenswrapper[4798]: I1011 04:11:17.052486 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-4c2c-account-create-7gnkg"] Oct 11 04:11:17 crc kubenswrapper[4798]: E1011 04:11:17.053097 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="15bea8df-46e9-43fc-9aee-1a303682485c" containerName="mariadb-database-create" Oct 11 04:11:17 crc kubenswrapper[4798]: I1011 04:11:17.053177 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="15bea8df-46e9-43fc-9aee-1a303682485c" containerName="mariadb-database-create" Oct 11 04:11:17 crc kubenswrapper[4798]: I1011 04:11:17.053416 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="15bea8df-46e9-43fc-9aee-1a303682485c" containerName="mariadb-database-create" Oct 11 04:11:17 crc kubenswrapper[4798]: I1011 04:11:17.055262 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-4c2c-account-create-7gnkg" Oct 11 04:11:17 crc kubenswrapper[4798]: I1011 04:11:17.059559 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-db-secret" Oct 11 04:11:17 crc kubenswrapper[4798]: I1011 04:11:17.072832 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-4c2c-account-create-7gnkg"] Oct 11 04:11:17 crc kubenswrapper[4798]: I1011 04:11:17.146891 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-646e-account-create-c766h"] Oct 11 04:11:17 crc kubenswrapper[4798]: I1011 04:11:17.148228 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-646e-account-create-c766h" Oct 11 04:11:17 crc kubenswrapper[4798]: I1011 04:11:17.150700 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-db-secret" Oct 11 04:11:17 crc kubenswrapper[4798]: I1011 04:11:17.159363 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-646e-account-create-c766h"] Oct 11 04:11:17 crc kubenswrapper[4798]: I1011 04:11:17.182049 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tlfx9\" (UniqueName: \"kubernetes.io/projected/af88f788-8ce4-4cf3-b570-4cc85140cb48-kube-api-access-tlfx9\") pod \"barbican-4c2c-account-create-7gnkg\" (UID: \"af88f788-8ce4-4cf3-b570-4cc85140cb48\") " pod="openstack/barbican-4c2c-account-create-7gnkg" Oct 11 04:11:17 crc kubenswrapper[4798]: I1011 04:11:17.284161 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4kjkd\" (UniqueName: \"kubernetes.io/projected/08c72420-5d00-49d3-aabb-527a1e9c22fb-kube-api-access-4kjkd\") pod \"cinder-646e-account-create-c766h\" (UID: \"08c72420-5d00-49d3-aabb-527a1e9c22fb\") " pod="openstack/cinder-646e-account-create-c766h" Oct 11 04:11:17 crc kubenswrapper[4798]: I1011 04:11:17.284941 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tlfx9\" (UniqueName: \"kubernetes.io/projected/af88f788-8ce4-4cf3-b570-4cc85140cb48-kube-api-access-tlfx9\") pod \"barbican-4c2c-account-create-7gnkg\" (UID: \"af88f788-8ce4-4cf3-b570-4cc85140cb48\") " pod="openstack/barbican-4c2c-account-create-7gnkg" Oct 11 04:11:17 crc kubenswrapper[4798]: I1011 04:11:17.308656 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tlfx9\" (UniqueName: \"kubernetes.io/projected/af88f788-8ce4-4cf3-b570-4cc85140cb48-kube-api-access-tlfx9\") pod \"barbican-4c2c-account-create-7gnkg\" (UID: \"af88f788-8ce4-4cf3-b570-4cc85140cb48\") " pod="openstack/barbican-4c2c-account-create-7gnkg" Oct 11 04:11:17 crc kubenswrapper[4798]: I1011 04:11:17.373554 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-4c2c-account-create-7gnkg" Oct 11 04:11:17 crc kubenswrapper[4798]: I1011 04:11:17.387945 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4kjkd\" (UniqueName: \"kubernetes.io/projected/08c72420-5d00-49d3-aabb-527a1e9c22fb-kube-api-access-4kjkd\") pod \"cinder-646e-account-create-c766h\" (UID: \"08c72420-5d00-49d3-aabb-527a1e9c22fb\") " pod="openstack/cinder-646e-account-create-c766h" Oct 11 04:11:17 crc kubenswrapper[4798]: I1011 04:11:17.412575 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4kjkd\" (UniqueName: \"kubernetes.io/projected/08c72420-5d00-49d3-aabb-527a1e9c22fb-kube-api-access-4kjkd\") pod \"cinder-646e-account-create-c766h\" (UID: \"08c72420-5d00-49d3-aabb-527a1e9c22fb\") " pod="openstack/cinder-646e-account-create-c766h" Oct 11 04:11:17 crc kubenswrapper[4798]: I1011 04:11:17.465443 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-646e-account-create-c766h" Oct 11 04:11:17 crc kubenswrapper[4798]: I1011 04:11:17.949957 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-4c2c-account-create-7gnkg"] Oct 11 04:11:17 crc kubenswrapper[4798]: W1011 04:11:17.953714 4798 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podaf88f788_8ce4_4cf3_b570_4cc85140cb48.slice/crio-6be99e8bc9011e0dea6b141a1b50cdacf76268b645f82ab8bfb9baa99b36431d WatchSource:0}: Error finding container 6be99e8bc9011e0dea6b141a1b50cdacf76268b645f82ab8bfb9baa99b36431d: Status 404 returned error can't find the container with id 6be99e8bc9011e0dea6b141a1b50cdacf76268b645f82ab8bfb9baa99b36431d Oct 11 04:11:17 crc kubenswrapper[4798]: W1011 04:11:17.955143 4798 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod08c72420_5d00_49d3_aabb_527a1e9c22fb.slice/crio-2be99da2a24fa9c09dca8768f31db19dae255db44a06821c7feeaf00ad3d00ed WatchSource:0}: Error finding container 2be99da2a24fa9c09dca8768f31db19dae255db44a06821c7feeaf00ad3d00ed: Status 404 returned error can't find the container with id 2be99da2a24fa9c09dca8768f31db19dae255db44a06821c7feeaf00ad3d00ed Oct 11 04:11:17 crc kubenswrapper[4798]: I1011 04:11:17.957692 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-646e-account-create-c766h"] Oct 11 04:11:18 crc kubenswrapper[4798]: I1011 04:11:18.006363 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-fqtt8" Oct 11 04:11:18 crc kubenswrapper[4798]: I1011 04:11:18.102036 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-c56vs\" (UniqueName: \"kubernetes.io/projected/6554e121-a137-45e1-83c2-f24b987099ea-kube-api-access-c56vs\") pod \"6554e121-a137-45e1-83c2-f24b987099ea\" (UID: \"6554e121-a137-45e1-83c2-f24b987099ea\") " Oct 11 04:11:18 crc kubenswrapper[4798]: I1011 04:11:18.102120 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6554e121-a137-45e1-83c2-f24b987099ea-combined-ca-bundle\") pod \"6554e121-a137-45e1-83c2-f24b987099ea\" (UID: \"6554e121-a137-45e1-83c2-f24b987099ea\") " Oct 11 04:11:18 crc kubenswrapper[4798]: I1011 04:11:18.102148 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6554e121-a137-45e1-83c2-f24b987099ea-config-data\") pod \"6554e121-a137-45e1-83c2-f24b987099ea\" (UID: \"6554e121-a137-45e1-83c2-f24b987099ea\") " Oct 11 04:11:18 crc kubenswrapper[4798]: I1011 04:11:18.108747 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6554e121-a137-45e1-83c2-f24b987099ea-kube-api-access-c56vs" (OuterVolumeSpecName: "kube-api-access-c56vs") pod "6554e121-a137-45e1-83c2-f24b987099ea" (UID: "6554e121-a137-45e1-83c2-f24b987099ea"). InnerVolumeSpecName "kube-api-access-c56vs". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 04:11:18 crc kubenswrapper[4798]: I1011 04:11:18.150290 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6554e121-a137-45e1-83c2-f24b987099ea-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6554e121-a137-45e1-83c2-f24b987099ea" (UID: "6554e121-a137-45e1-83c2-f24b987099ea"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:11:18 crc kubenswrapper[4798]: I1011 04:11:18.180310 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6554e121-a137-45e1-83c2-f24b987099ea-config-data" (OuterVolumeSpecName: "config-data") pod "6554e121-a137-45e1-83c2-f24b987099ea" (UID: "6554e121-a137-45e1-83c2-f24b987099ea"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:11:18 crc kubenswrapper[4798]: I1011 04:11:18.204677 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-c56vs\" (UniqueName: \"kubernetes.io/projected/6554e121-a137-45e1-83c2-f24b987099ea-kube-api-access-c56vs\") on node \"crc\" DevicePath \"\"" Oct 11 04:11:18 crc kubenswrapper[4798]: I1011 04:11:18.204734 4798 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6554e121-a137-45e1-83c2-f24b987099ea-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 11 04:11:18 crc kubenswrapper[4798]: I1011 04:11:18.204745 4798 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6554e121-a137-45e1-83c2-f24b987099ea-config-data\") on node \"crc\" DevicePath \"\"" Oct 11 04:11:18 crc kubenswrapper[4798]: I1011 04:11:18.672303 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-fqtt8" event={"ID":"6554e121-a137-45e1-83c2-f24b987099ea","Type":"ContainerDied","Data":"27f6623c98e0456a5e2ee37e7e68f75d5ebccd611bc3477665f6633844e76e73"} Oct 11 04:11:18 crc kubenswrapper[4798]: I1011 04:11:18.672459 4798 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="27f6623c98e0456a5e2ee37e7e68f75d5ebccd611bc3477665f6633844e76e73" Oct 11 04:11:18 crc kubenswrapper[4798]: I1011 04:11:18.672463 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-fqtt8" Oct 11 04:11:18 crc kubenswrapper[4798]: I1011 04:11:18.675026 4798 generic.go:334] "Generic (PLEG): container finished" podID="08c72420-5d00-49d3-aabb-527a1e9c22fb" containerID="31f5f9c766ed1d6c74a4ec0cb1db8809ceeb6ed936246a20fc889987020aec7c" exitCode=0 Oct 11 04:11:18 crc kubenswrapper[4798]: I1011 04:11:18.675120 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-646e-account-create-c766h" event={"ID":"08c72420-5d00-49d3-aabb-527a1e9c22fb","Type":"ContainerDied","Data":"31f5f9c766ed1d6c74a4ec0cb1db8809ceeb6ed936246a20fc889987020aec7c"} Oct 11 04:11:18 crc kubenswrapper[4798]: I1011 04:11:18.675147 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-646e-account-create-c766h" event={"ID":"08c72420-5d00-49d3-aabb-527a1e9c22fb","Type":"ContainerStarted","Data":"2be99da2a24fa9c09dca8768f31db19dae255db44a06821c7feeaf00ad3d00ed"} Oct 11 04:11:18 crc kubenswrapper[4798]: I1011 04:11:18.678775 4798 generic.go:334] "Generic (PLEG): container finished" podID="af88f788-8ce4-4cf3-b570-4cc85140cb48" containerID="fba64869f56c8567efd4fa21cf654a04b3cbd1bfe2f2ecb58411fa2ae429aec9" exitCode=0 Oct 11 04:11:18 crc kubenswrapper[4798]: I1011 04:11:18.678852 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-4c2c-account-create-7gnkg" event={"ID":"af88f788-8ce4-4cf3-b570-4cc85140cb48","Type":"ContainerDied","Data":"fba64869f56c8567efd4fa21cf654a04b3cbd1bfe2f2ecb58411fa2ae429aec9"} Oct 11 04:11:18 crc kubenswrapper[4798]: I1011 04:11:18.678897 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-4c2c-account-create-7gnkg" event={"ID":"af88f788-8ce4-4cf3-b570-4cc85140cb48","Type":"ContainerStarted","Data":"6be99e8bc9011e0dea6b141a1b50cdacf76268b645f82ab8bfb9baa99b36431d"} Oct 11 04:11:18 crc kubenswrapper[4798]: I1011 04:11:18.953716 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-554567b4f7-z8l5w"] Oct 11 04:11:18 crc kubenswrapper[4798]: I1011 04:11:18.954081 4798 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-554567b4f7-z8l5w" podUID="482591d3-dd16-4a35-9c65-45ab2ad7d057" containerName="dnsmasq-dns" containerID="cri-o://5cf0a0b8d8350bfdda59d6790ac94f55d5cc147be0ea4e48fab62b8914c4977a" gracePeriod=10 Oct 11 04:11:18 crc kubenswrapper[4798]: I1011 04:11:18.996121 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-qf994"] Oct 11 04:11:18 crc kubenswrapper[4798]: E1011 04:11:18.996824 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6554e121-a137-45e1-83c2-f24b987099ea" containerName="keystone-db-sync" Oct 11 04:11:18 crc kubenswrapper[4798]: I1011 04:11:18.996852 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="6554e121-a137-45e1-83c2-f24b987099ea" containerName="keystone-db-sync" Oct 11 04:11:18 crc kubenswrapper[4798]: I1011 04:11:18.997110 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="6554e121-a137-45e1-83c2-f24b987099ea" containerName="keystone-db-sync" Oct 11 04:11:18 crc kubenswrapper[4798]: I1011 04:11:18.998102 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-qf994" Oct 11 04:11:19 crc kubenswrapper[4798]: I1011 04:11:19.002085 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Oct 11 04:11:19 crc kubenswrapper[4798]: I1011 04:11:19.002130 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Oct 11 04:11:19 crc kubenswrapper[4798]: I1011 04:11:19.002422 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Oct 11 04:11:19 crc kubenswrapper[4798]: I1011 04:11:19.002564 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-768mh" Oct 11 04:11:19 crc kubenswrapper[4798]: I1011 04:11:19.018797 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-qf994"] Oct 11 04:11:19 crc kubenswrapper[4798]: I1011 04:11:19.031993 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-67795cd9-bmgk8"] Oct 11 04:11:19 crc kubenswrapper[4798]: I1011 04:11:19.035328 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-67795cd9-bmgk8" Oct 11 04:11:19 crc kubenswrapper[4798]: I1011 04:11:19.044123 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-67795cd9-bmgk8"] Oct 11 04:11:19 crc kubenswrapper[4798]: I1011 04:11:19.128815 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/84ab6ec2-5733-4753-afa0-53e9437b7fc2-dns-svc\") pod \"dnsmasq-dns-67795cd9-bmgk8\" (UID: \"84ab6ec2-5733-4753-afa0-53e9437b7fc2\") " pod="openstack/dnsmasq-dns-67795cd9-bmgk8" Oct 11 04:11:19 crc kubenswrapper[4798]: I1011 04:11:19.128880 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/84ab6ec2-5733-4753-afa0-53e9437b7fc2-ovsdbserver-nb\") pod \"dnsmasq-dns-67795cd9-bmgk8\" (UID: \"84ab6ec2-5733-4753-afa0-53e9437b7fc2\") " pod="openstack/dnsmasq-dns-67795cd9-bmgk8" Oct 11 04:11:19 crc kubenswrapper[4798]: I1011 04:11:19.128930 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/84ab6ec2-5733-4753-afa0-53e9437b7fc2-ovsdbserver-sb\") pod \"dnsmasq-dns-67795cd9-bmgk8\" (UID: \"84ab6ec2-5733-4753-afa0-53e9437b7fc2\") " pod="openstack/dnsmasq-dns-67795cd9-bmgk8" Oct 11 04:11:19 crc kubenswrapper[4798]: I1011 04:11:19.128963 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1e51ab81-3ca9-4cc2-8292-6a6e56d508f2-config-data\") pod \"keystone-bootstrap-qf994\" (UID: \"1e51ab81-3ca9-4cc2-8292-6a6e56d508f2\") " pod="openstack/keystone-bootstrap-qf994" Oct 11 04:11:19 crc kubenswrapper[4798]: I1011 04:11:19.128986 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rp8t7\" (UniqueName: \"kubernetes.io/projected/1e51ab81-3ca9-4cc2-8292-6a6e56d508f2-kube-api-access-rp8t7\") pod \"keystone-bootstrap-qf994\" (UID: \"1e51ab81-3ca9-4cc2-8292-6a6e56d508f2\") " pod="openstack/keystone-bootstrap-qf994" Oct 11 04:11:19 crc kubenswrapper[4798]: I1011 04:11:19.129028 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/84ab6ec2-5733-4753-afa0-53e9437b7fc2-config\") pod \"dnsmasq-dns-67795cd9-bmgk8\" (UID: \"84ab6ec2-5733-4753-afa0-53e9437b7fc2\") " pod="openstack/dnsmasq-dns-67795cd9-bmgk8" Oct 11 04:11:19 crc kubenswrapper[4798]: I1011 04:11:19.129051 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1e51ab81-3ca9-4cc2-8292-6a6e56d508f2-scripts\") pod \"keystone-bootstrap-qf994\" (UID: \"1e51ab81-3ca9-4cc2-8292-6a6e56d508f2\") " pod="openstack/keystone-bootstrap-qf994" Oct 11 04:11:19 crc kubenswrapper[4798]: I1011 04:11:19.129074 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/1e51ab81-3ca9-4cc2-8292-6a6e56d508f2-credential-keys\") pod \"keystone-bootstrap-qf994\" (UID: \"1e51ab81-3ca9-4cc2-8292-6a6e56d508f2\") " pod="openstack/keystone-bootstrap-qf994" Oct 11 04:11:19 crc kubenswrapper[4798]: I1011 04:11:19.129110 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1e51ab81-3ca9-4cc2-8292-6a6e56d508f2-combined-ca-bundle\") pod \"keystone-bootstrap-qf994\" (UID: \"1e51ab81-3ca9-4cc2-8292-6a6e56d508f2\") " pod="openstack/keystone-bootstrap-qf994" Oct 11 04:11:19 crc kubenswrapper[4798]: I1011 04:11:19.129176 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/1e51ab81-3ca9-4cc2-8292-6a6e56d508f2-fernet-keys\") pod \"keystone-bootstrap-qf994\" (UID: \"1e51ab81-3ca9-4cc2-8292-6a6e56d508f2\") " pod="openstack/keystone-bootstrap-qf994" Oct 11 04:11:19 crc kubenswrapper[4798]: I1011 04:11:19.129200 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b6slz\" (UniqueName: \"kubernetes.io/projected/84ab6ec2-5733-4753-afa0-53e9437b7fc2-kube-api-access-b6slz\") pod \"dnsmasq-dns-67795cd9-bmgk8\" (UID: \"84ab6ec2-5733-4753-afa0-53e9437b7fc2\") " pod="openstack/dnsmasq-dns-67795cd9-bmgk8" Oct 11 04:11:19 crc kubenswrapper[4798]: I1011 04:11:19.230538 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/1e51ab81-3ca9-4cc2-8292-6a6e56d508f2-fernet-keys\") pod \"keystone-bootstrap-qf994\" (UID: \"1e51ab81-3ca9-4cc2-8292-6a6e56d508f2\") " pod="openstack/keystone-bootstrap-qf994" Oct 11 04:11:19 crc kubenswrapper[4798]: I1011 04:11:19.230594 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b6slz\" (UniqueName: \"kubernetes.io/projected/84ab6ec2-5733-4753-afa0-53e9437b7fc2-kube-api-access-b6slz\") pod \"dnsmasq-dns-67795cd9-bmgk8\" (UID: \"84ab6ec2-5733-4753-afa0-53e9437b7fc2\") " pod="openstack/dnsmasq-dns-67795cd9-bmgk8" Oct 11 04:11:19 crc kubenswrapper[4798]: I1011 04:11:19.230635 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/84ab6ec2-5733-4753-afa0-53e9437b7fc2-dns-svc\") pod \"dnsmasq-dns-67795cd9-bmgk8\" (UID: \"84ab6ec2-5733-4753-afa0-53e9437b7fc2\") " pod="openstack/dnsmasq-dns-67795cd9-bmgk8" Oct 11 04:11:19 crc kubenswrapper[4798]: I1011 04:11:19.230664 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/84ab6ec2-5733-4753-afa0-53e9437b7fc2-ovsdbserver-nb\") pod \"dnsmasq-dns-67795cd9-bmgk8\" (UID: \"84ab6ec2-5733-4753-afa0-53e9437b7fc2\") " pod="openstack/dnsmasq-dns-67795cd9-bmgk8" Oct 11 04:11:19 crc kubenswrapper[4798]: I1011 04:11:19.230702 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/84ab6ec2-5733-4753-afa0-53e9437b7fc2-ovsdbserver-sb\") pod \"dnsmasq-dns-67795cd9-bmgk8\" (UID: \"84ab6ec2-5733-4753-afa0-53e9437b7fc2\") " pod="openstack/dnsmasq-dns-67795cd9-bmgk8" Oct 11 04:11:19 crc kubenswrapper[4798]: I1011 04:11:19.230722 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1e51ab81-3ca9-4cc2-8292-6a6e56d508f2-config-data\") pod \"keystone-bootstrap-qf994\" (UID: \"1e51ab81-3ca9-4cc2-8292-6a6e56d508f2\") " pod="openstack/keystone-bootstrap-qf994" Oct 11 04:11:19 crc kubenswrapper[4798]: I1011 04:11:19.231240 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rp8t7\" (UniqueName: \"kubernetes.io/projected/1e51ab81-3ca9-4cc2-8292-6a6e56d508f2-kube-api-access-rp8t7\") pod \"keystone-bootstrap-qf994\" (UID: \"1e51ab81-3ca9-4cc2-8292-6a6e56d508f2\") " pod="openstack/keystone-bootstrap-qf994" Oct 11 04:11:19 crc kubenswrapper[4798]: I1011 04:11:19.232045 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/84ab6ec2-5733-4753-afa0-53e9437b7fc2-ovsdbserver-sb\") pod \"dnsmasq-dns-67795cd9-bmgk8\" (UID: \"84ab6ec2-5733-4753-afa0-53e9437b7fc2\") " pod="openstack/dnsmasq-dns-67795cd9-bmgk8" Oct 11 04:11:19 crc kubenswrapper[4798]: I1011 04:11:19.232042 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/84ab6ec2-5733-4753-afa0-53e9437b7fc2-dns-svc\") pod \"dnsmasq-dns-67795cd9-bmgk8\" (UID: \"84ab6ec2-5733-4753-afa0-53e9437b7fc2\") " pod="openstack/dnsmasq-dns-67795cd9-bmgk8" Oct 11 04:11:19 crc kubenswrapper[4798]: I1011 04:11:19.232135 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/84ab6ec2-5733-4753-afa0-53e9437b7fc2-config\") pod \"dnsmasq-dns-67795cd9-bmgk8\" (UID: \"84ab6ec2-5733-4753-afa0-53e9437b7fc2\") " pod="openstack/dnsmasq-dns-67795cd9-bmgk8" Oct 11 04:11:19 crc kubenswrapper[4798]: I1011 04:11:19.232175 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1e51ab81-3ca9-4cc2-8292-6a6e56d508f2-scripts\") pod \"keystone-bootstrap-qf994\" (UID: \"1e51ab81-3ca9-4cc2-8292-6a6e56d508f2\") " pod="openstack/keystone-bootstrap-qf994" Oct 11 04:11:19 crc kubenswrapper[4798]: I1011 04:11:19.232194 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/1e51ab81-3ca9-4cc2-8292-6a6e56d508f2-credential-keys\") pod \"keystone-bootstrap-qf994\" (UID: \"1e51ab81-3ca9-4cc2-8292-6a6e56d508f2\") " pod="openstack/keystone-bootstrap-qf994" Oct 11 04:11:19 crc kubenswrapper[4798]: I1011 04:11:19.232229 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1e51ab81-3ca9-4cc2-8292-6a6e56d508f2-combined-ca-bundle\") pod \"keystone-bootstrap-qf994\" (UID: \"1e51ab81-3ca9-4cc2-8292-6a6e56d508f2\") " pod="openstack/keystone-bootstrap-qf994" Oct 11 04:11:19 crc kubenswrapper[4798]: I1011 04:11:19.232640 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/84ab6ec2-5733-4753-afa0-53e9437b7fc2-ovsdbserver-nb\") pod \"dnsmasq-dns-67795cd9-bmgk8\" (UID: \"84ab6ec2-5733-4753-afa0-53e9437b7fc2\") " pod="openstack/dnsmasq-dns-67795cd9-bmgk8" Oct 11 04:11:19 crc kubenswrapper[4798]: I1011 04:11:19.233286 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/84ab6ec2-5733-4753-afa0-53e9437b7fc2-config\") pod \"dnsmasq-dns-67795cd9-bmgk8\" (UID: \"84ab6ec2-5733-4753-afa0-53e9437b7fc2\") " pod="openstack/dnsmasq-dns-67795cd9-bmgk8" Oct 11 04:11:19 crc kubenswrapper[4798]: I1011 04:11:19.237792 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1e51ab81-3ca9-4cc2-8292-6a6e56d508f2-config-data\") pod \"keystone-bootstrap-qf994\" (UID: \"1e51ab81-3ca9-4cc2-8292-6a6e56d508f2\") " pod="openstack/keystone-bootstrap-qf994" Oct 11 04:11:19 crc kubenswrapper[4798]: I1011 04:11:19.243383 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/1e51ab81-3ca9-4cc2-8292-6a6e56d508f2-fernet-keys\") pod \"keystone-bootstrap-qf994\" (UID: \"1e51ab81-3ca9-4cc2-8292-6a6e56d508f2\") " pod="openstack/keystone-bootstrap-qf994" Oct 11 04:11:19 crc kubenswrapper[4798]: I1011 04:11:19.254553 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1e51ab81-3ca9-4cc2-8292-6a6e56d508f2-combined-ca-bundle\") pod \"keystone-bootstrap-qf994\" (UID: \"1e51ab81-3ca9-4cc2-8292-6a6e56d508f2\") " pod="openstack/keystone-bootstrap-qf994" Oct 11 04:11:19 crc kubenswrapper[4798]: I1011 04:11:19.261293 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1e51ab81-3ca9-4cc2-8292-6a6e56d508f2-scripts\") pod \"keystone-bootstrap-qf994\" (UID: \"1e51ab81-3ca9-4cc2-8292-6a6e56d508f2\") " pod="openstack/keystone-bootstrap-qf994" Oct 11 04:11:19 crc kubenswrapper[4798]: I1011 04:11:19.264320 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/1e51ab81-3ca9-4cc2-8292-6a6e56d508f2-credential-keys\") pod \"keystone-bootstrap-qf994\" (UID: \"1e51ab81-3ca9-4cc2-8292-6a6e56d508f2\") " pod="openstack/keystone-bootstrap-qf994" Oct 11 04:11:19 crc kubenswrapper[4798]: I1011 04:11:19.284205 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b6slz\" (UniqueName: \"kubernetes.io/projected/84ab6ec2-5733-4753-afa0-53e9437b7fc2-kube-api-access-b6slz\") pod \"dnsmasq-dns-67795cd9-bmgk8\" (UID: \"84ab6ec2-5733-4753-afa0-53e9437b7fc2\") " pod="openstack/dnsmasq-dns-67795cd9-bmgk8" Oct 11 04:11:19 crc kubenswrapper[4798]: I1011 04:11:19.287023 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rp8t7\" (UniqueName: \"kubernetes.io/projected/1e51ab81-3ca9-4cc2-8292-6a6e56d508f2-kube-api-access-rp8t7\") pod \"keystone-bootstrap-qf994\" (UID: \"1e51ab81-3ca9-4cc2-8292-6a6e56d508f2\") " pod="openstack/keystone-bootstrap-qf994" Oct 11 04:11:19 crc kubenswrapper[4798]: I1011 04:11:19.330045 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-qf994" Oct 11 04:11:19 crc kubenswrapper[4798]: I1011 04:11:19.384719 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-67795cd9-bmgk8"] Oct 11 04:11:19 crc kubenswrapper[4798]: I1011 04:11:19.386181 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-67795cd9-bmgk8" Oct 11 04:11:19 crc kubenswrapper[4798]: I1011 04:11:19.394843 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-sync-pjqvn"] Oct 11 04:11:19 crc kubenswrapper[4798]: I1011 04:11:19.396434 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-pjqvn" Oct 11 04:11:19 crc kubenswrapper[4798]: I1011 04:11:19.408328 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Oct 11 04:11:19 crc kubenswrapper[4798]: I1011 04:11:19.408646 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Oct 11 04:11:19 crc kubenswrapper[4798]: I1011 04:11:19.409543 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-v94bf" Oct 11 04:11:19 crc kubenswrapper[4798]: I1011 04:11:19.501332 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-pjqvn"] Oct 11 04:11:19 crc kubenswrapper[4798]: I1011 04:11:19.501372 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5b6dbdb6f5-w26gm"] Oct 11 04:11:19 crc kubenswrapper[4798]: I1011 04:11:19.502822 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5b6dbdb6f5-w26gm" Oct 11 04:11:19 crc kubenswrapper[4798]: I1011 04:11:19.503184 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5b6dbdb6f5-w26gm"] Oct 11 04:11:19 crc kubenswrapper[4798]: I1011 04:11:19.542761 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/00f2a7d2-ced9-4a97-97d9-e002e8b0ebbd-logs\") pod \"placement-db-sync-pjqvn\" (UID: \"00f2a7d2-ced9-4a97-97d9-e002e8b0ebbd\") " pod="openstack/placement-db-sync-pjqvn" Oct 11 04:11:19 crc kubenswrapper[4798]: I1011 04:11:19.542952 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/00f2a7d2-ced9-4a97-97d9-e002e8b0ebbd-scripts\") pod \"placement-db-sync-pjqvn\" (UID: \"00f2a7d2-ced9-4a97-97d9-e002e8b0ebbd\") " pod="openstack/placement-db-sync-pjqvn" Oct 11 04:11:19 crc kubenswrapper[4798]: I1011 04:11:19.543047 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/00f2a7d2-ced9-4a97-97d9-e002e8b0ebbd-combined-ca-bundle\") pod \"placement-db-sync-pjqvn\" (UID: \"00f2a7d2-ced9-4a97-97d9-e002e8b0ebbd\") " pod="openstack/placement-db-sync-pjqvn" Oct 11 04:11:19 crc kubenswrapper[4798]: I1011 04:11:19.543245 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qz2js\" (UniqueName: \"kubernetes.io/projected/00f2a7d2-ced9-4a97-97d9-e002e8b0ebbd-kube-api-access-qz2js\") pod \"placement-db-sync-pjqvn\" (UID: \"00f2a7d2-ced9-4a97-97d9-e002e8b0ebbd\") " pod="openstack/placement-db-sync-pjqvn" Oct 11 04:11:19 crc kubenswrapper[4798]: I1011 04:11:19.543294 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/00f2a7d2-ced9-4a97-97d9-e002e8b0ebbd-config-data\") pod \"placement-db-sync-pjqvn\" (UID: \"00f2a7d2-ced9-4a97-97d9-e002e8b0ebbd\") " pod="openstack/placement-db-sync-pjqvn" Oct 11 04:11:19 crc kubenswrapper[4798]: I1011 04:11:19.555665 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Oct 11 04:11:19 crc kubenswrapper[4798]: I1011 04:11:19.558349 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Oct 11 04:11:19 crc kubenswrapper[4798]: I1011 04:11:19.561721 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Oct 11 04:11:19 crc kubenswrapper[4798]: I1011 04:11:19.561718 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Oct 11 04:11:19 crc kubenswrapper[4798]: I1011 04:11:19.595157 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Oct 11 04:11:19 crc kubenswrapper[4798]: I1011 04:11:19.651858 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/00f2a7d2-ced9-4a97-97d9-e002e8b0ebbd-logs\") pod \"placement-db-sync-pjqvn\" (UID: \"00f2a7d2-ced9-4a97-97d9-e002e8b0ebbd\") " pod="openstack/placement-db-sync-pjqvn" Oct 11 04:11:19 crc kubenswrapper[4798]: I1011 04:11:19.652123 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/0020f7ba-0c70-4bcf-90a8-68729a98025c-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"0020f7ba-0c70-4bcf-90a8-68729a98025c\") " pod="openstack/ceilometer-0" Oct 11 04:11:19 crc kubenswrapper[4798]: I1011 04:11:19.652168 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6ptmg\" (UniqueName: \"kubernetes.io/projected/0020f7ba-0c70-4bcf-90a8-68729a98025c-kube-api-access-6ptmg\") pod \"ceilometer-0\" (UID: \"0020f7ba-0c70-4bcf-90a8-68729a98025c\") " pod="openstack/ceilometer-0" Oct 11 04:11:19 crc kubenswrapper[4798]: I1011 04:11:19.652227 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0020f7ba-0c70-4bcf-90a8-68729a98025c-config-data\") pod \"ceilometer-0\" (UID: \"0020f7ba-0c70-4bcf-90a8-68729a98025c\") " pod="openstack/ceilometer-0" Oct 11 04:11:19 crc kubenswrapper[4798]: I1011 04:11:19.652274 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/00f2a7d2-ced9-4a97-97d9-e002e8b0ebbd-scripts\") pod \"placement-db-sync-pjqvn\" (UID: \"00f2a7d2-ced9-4a97-97d9-e002e8b0ebbd\") " pod="openstack/placement-db-sync-pjqvn" Oct 11 04:11:19 crc kubenswrapper[4798]: I1011 04:11:19.652405 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/00f2a7d2-ced9-4a97-97d9-e002e8b0ebbd-combined-ca-bundle\") pod \"placement-db-sync-pjqvn\" (UID: \"00f2a7d2-ced9-4a97-97d9-e002e8b0ebbd\") " pod="openstack/placement-db-sync-pjqvn" Oct 11 04:11:19 crc kubenswrapper[4798]: I1011 04:11:19.652446 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d40dd511-9540-43b0-b019-9d58ecb9a4fe-ovsdbserver-nb\") pod \"dnsmasq-dns-5b6dbdb6f5-w26gm\" (UID: \"d40dd511-9540-43b0-b019-9d58ecb9a4fe\") " pod="openstack/dnsmasq-dns-5b6dbdb6f5-w26gm" Oct 11 04:11:19 crc kubenswrapper[4798]: I1011 04:11:19.652483 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qz2js\" (UniqueName: \"kubernetes.io/projected/00f2a7d2-ced9-4a97-97d9-e002e8b0ebbd-kube-api-access-qz2js\") pod \"placement-db-sync-pjqvn\" (UID: \"00f2a7d2-ced9-4a97-97d9-e002e8b0ebbd\") " pod="openstack/placement-db-sync-pjqvn" Oct 11 04:11:19 crc kubenswrapper[4798]: I1011 04:11:19.652511 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d40dd511-9540-43b0-b019-9d58ecb9a4fe-dns-svc\") pod \"dnsmasq-dns-5b6dbdb6f5-w26gm\" (UID: \"d40dd511-9540-43b0-b019-9d58ecb9a4fe\") " pod="openstack/dnsmasq-dns-5b6dbdb6f5-w26gm" Oct 11 04:11:19 crc kubenswrapper[4798]: I1011 04:11:19.652541 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d40dd511-9540-43b0-b019-9d58ecb9a4fe-ovsdbserver-sb\") pod \"dnsmasq-dns-5b6dbdb6f5-w26gm\" (UID: \"d40dd511-9540-43b0-b019-9d58ecb9a4fe\") " pod="openstack/dnsmasq-dns-5b6dbdb6f5-w26gm" Oct 11 04:11:19 crc kubenswrapper[4798]: I1011 04:11:19.652577 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/00f2a7d2-ced9-4a97-97d9-e002e8b0ebbd-config-data\") pod \"placement-db-sync-pjqvn\" (UID: \"00f2a7d2-ced9-4a97-97d9-e002e8b0ebbd\") " pod="openstack/placement-db-sync-pjqvn" Oct 11 04:11:19 crc kubenswrapper[4798]: I1011 04:11:19.652609 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0020f7ba-0c70-4bcf-90a8-68729a98025c-run-httpd\") pod \"ceilometer-0\" (UID: \"0020f7ba-0c70-4bcf-90a8-68729a98025c\") " pod="openstack/ceilometer-0" Oct 11 04:11:19 crc kubenswrapper[4798]: I1011 04:11:19.652642 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d40dd511-9540-43b0-b019-9d58ecb9a4fe-config\") pod \"dnsmasq-dns-5b6dbdb6f5-w26gm\" (UID: \"d40dd511-9540-43b0-b019-9d58ecb9a4fe\") " pod="openstack/dnsmasq-dns-5b6dbdb6f5-w26gm" Oct 11 04:11:19 crc kubenswrapper[4798]: I1011 04:11:19.652689 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0020f7ba-0c70-4bcf-90a8-68729a98025c-log-httpd\") pod \"ceilometer-0\" (UID: \"0020f7ba-0c70-4bcf-90a8-68729a98025c\") " pod="openstack/ceilometer-0" Oct 11 04:11:19 crc kubenswrapper[4798]: I1011 04:11:19.653927 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0020f7ba-0c70-4bcf-90a8-68729a98025c-scripts\") pod \"ceilometer-0\" (UID: \"0020f7ba-0c70-4bcf-90a8-68729a98025c\") " pod="openstack/ceilometer-0" Oct 11 04:11:19 crc kubenswrapper[4798]: I1011 04:11:19.653963 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rp9p8\" (UniqueName: \"kubernetes.io/projected/d40dd511-9540-43b0-b019-9d58ecb9a4fe-kube-api-access-rp9p8\") pod \"dnsmasq-dns-5b6dbdb6f5-w26gm\" (UID: \"d40dd511-9540-43b0-b019-9d58ecb9a4fe\") " pod="openstack/dnsmasq-dns-5b6dbdb6f5-w26gm" Oct 11 04:11:19 crc kubenswrapper[4798]: I1011 04:11:19.654050 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0020f7ba-0c70-4bcf-90a8-68729a98025c-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"0020f7ba-0c70-4bcf-90a8-68729a98025c\") " pod="openstack/ceilometer-0" Oct 11 04:11:19 crc kubenswrapper[4798]: I1011 04:11:19.654802 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/00f2a7d2-ced9-4a97-97d9-e002e8b0ebbd-logs\") pod \"placement-db-sync-pjqvn\" (UID: \"00f2a7d2-ced9-4a97-97d9-e002e8b0ebbd\") " pod="openstack/placement-db-sync-pjqvn" Oct 11 04:11:19 crc kubenswrapper[4798]: I1011 04:11:19.671664 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/00f2a7d2-ced9-4a97-97d9-e002e8b0ebbd-config-data\") pod \"placement-db-sync-pjqvn\" (UID: \"00f2a7d2-ced9-4a97-97d9-e002e8b0ebbd\") " pod="openstack/placement-db-sync-pjqvn" Oct 11 04:11:19 crc kubenswrapper[4798]: I1011 04:11:19.671842 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/00f2a7d2-ced9-4a97-97d9-e002e8b0ebbd-scripts\") pod \"placement-db-sync-pjqvn\" (UID: \"00f2a7d2-ced9-4a97-97d9-e002e8b0ebbd\") " pod="openstack/placement-db-sync-pjqvn" Oct 11 04:11:19 crc kubenswrapper[4798]: I1011 04:11:19.673877 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/00f2a7d2-ced9-4a97-97d9-e002e8b0ebbd-combined-ca-bundle\") pod \"placement-db-sync-pjqvn\" (UID: \"00f2a7d2-ced9-4a97-97d9-e002e8b0ebbd\") " pod="openstack/placement-db-sync-pjqvn" Oct 11 04:11:19 crc kubenswrapper[4798]: I1011 04:11:19.676319 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qz2js\" (UniqueName: \"kubernetes.io/projected/00f2a7d2-ced9-4a97-97d9-e002e8b0ebbd-kube-api-access-qz2js\") pod \"placement-db-sync-pjqvn\" (UID: \"00f2a7d2-ced9-4a97-97d9-e002e8b0ebbd\") " pod="openstack/placement-db-sync-pjqvn" Oct 11 04:11:19 crc kubenswrapper[4798]: I1011 04:11:19.696352 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-554567b4f7-z8l5w" Oct 11 04:11:19 crc kubenswrapper[4798]: I1011 04:11:19.700342 4798 generic.go:334] "Generic (PLEG): container finished" podID="482591d3-dd16-4a35-9c65-45ab2ad7d057" containerID="5cf0a0b8d8350bfdda59d6790ac94f55d5cc147be0ea4e48fab62b8914c4977a" exitCode=0 Oct 11 04:11:19 crc kubenswrapper[4798]: I1011 04:11:19.700611 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-554567b4f7-z8l5w" event={"ID":"482591d3-dd16-4a35-9c65-45ab2ad7d057","Type":"ContainerDied","Data":"5cf0a0b8d8350bfdda59d6790ac94f55d5cc147be0ea4e48fab62b8914c4977a"} Oct 11 04:11:19 crc kubenswrapper[4798]: I1011 04:11:19.700653 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-554567b4f7-z8l5w" event={"ID":"482591d3-dd16-4a35-9c65-45ab2ad7d057","Type":"ContainerDied","Data":"9ed041f125b355f112a48b9088da67647645a48613677b26e992690f12983372"} Oct 11 04:11:19 crc kubenswrapper[4798]: I1011 04:11:19.700676 4798 scope.go:117] "RemoveContainer" containerID="5cf0a0b8d8350bfdda59d6790ac94f55d5cc147be0ea4e48fab62b8914c4977a" Oct 11 04:11:19 crc kubenswrapper[4798]: I1011 04:11:19.755671 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d40dd511-9540-43b0-b019-9d58ecb9a4fe-ovsdbserver-nb\") pod \"dnsmasq-dns-5b6dbdb6f5-w26gm\" (UID: \"d40dd511-9540-43b0-b019-9d58ecb9a4fe\") " pod="openstack/dnsmasq-dns-5b6dbdb6f5-w26gm" Oct 11 04:11:19 crc kubenswrapper[4798]: I1011 04:11:19.755721 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d40dd511-9540-43b0-b019-9d58ecb9a4fe-dns-svc\") pod \"dnsmasq-dns-5b6dbdb6f5-w26gm\" (UID: \"d40dd511-9540-43b0-b019-9d58ecb9a4fe\") " pod="openstack/dnsmasq-dns-5b6dbdb6f5-w26gm" Oct 11 04:11:19 crc kubenswrapper[4798]: I1011 04:11:19.755745 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d40dd511-9540-43b0-b019-9d58ecb9a4fe-ovsdbserver-sb\") pod \"dnsmasq-dns-5b6dbdb6f5-w26gm\" (UID: \"d40dd511-9540-43b0-b019-9d58ecb9a4fe\") " pod="openstack/dnsmasq-dns-5b6dbdb6f5-w26gm" Oct 11 04:11:19 crc kubenswrapper[4798]: I1011 04:11:19.755787 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0020f7ba-0c70-4bcf-90a8-68729a98025c-run-httpd\") pod \"ceilometer-0\" (UID: \"0020f7ba-0c70-4bcf-90a8-68729a98025c\") " pod="openstack/ceilometer-0" Oct 11 04:11:19 crc kubenswrapper[4798]: I1011 04:11:19.755810 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d40dd511-9540-43b0-b019-9d58ecb9a4fe-config\") pod \"dnsmasq-dns-5b6dbdb6f5-w26gm\" (UID: \"d40dd511-9540-43b0-b019-9d58ecb9a4fe\") " pod="openstack/dnsmasq-dns-5b6dbdb6f5-w26gm" Oct 11 04:11:19 crc kubenswrapper[4798]: I1011 04:11:19.755841 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0020f7ba-0c70-4bcf-90a8-68729a98025c-log-httpd\") pod \"ceilometer-0\" (UID: \"0020f7ba-0c70-4bcf-90a8-68729a98025c\") " pod="openstack/ceilometer-0" Oct 11 04:11:19 crc kubenswrapper[4798]: I1011 04:11:19.755859 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0020f7ba-0c70-4bcf-90a8-68729a98025c-scripts\") pod \"ceilometer-0\" (UID: \"0020f7ba-0c70-4bcf-90a8-68729a98025c\") " pod="openstack/ceilometer-0" Oct 11 04:11:19 crc kubenswrapper[4798]: I1011 04:11:19.755879 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rp9p8\" (UniqueName: \"kubernetes.io/projected/d40dd511-9540-43b0-b019-9d58ecb9a4fe-kube-api-access-rp9p8\") pod \"dnsmasq-dns-5b6dbdb6f5-w26gm\" (UID: \"d40dd511-9540-43b0-b019-9d58ecb9a4fe\") " pod="openstack/dnsmasq-dns-5b6dbdb6f5-w26gm" Oct 11 04:11:19 crc kubenswrapper[4798]: I1011 04:11:19.755910 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0020f7ba-0c70-4bcf-90a8-68729a98025c-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"0020f7ba-0c70-4bcf-90a8-68729a98025c\") " pod="openstack/ceilometer-0" Oct 11 04:11:19 crc kubenswrapper[4798]: I1011 04:11:19.755959 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/0020f7ba-0c70-4bcf-90a8-68729a98025c-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"0020f7ba-0c70-4bcf-90a8-68729a98025c\") " pod="openstack/ceilometer-0" Oct 11 04:11:19 crc kubenswrapper[4798]: I1011 04:11:19.755984 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6ptmg\" (UniqueName: \"kubernetes.io/projected/0020f7ba-0c70-4bcf-90a8-68729a98025c-kube-api-access-6ptmg\") pod \"ceilometer-0\" (UID: \"0020f7ba-0c70-4bcf-90a8-68729a98025c\") " pod="openstack/ceilometer-0" Oct 11 04:11:19 crc kubenswrapper[4798]: I1011 04:11:19.756011 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0020f7ba-0c70-4bcf-90a8-68729a98025c-config-data\") pod \"ceilometer-0\" (UID: \"0020f7ba-0c70-4bcf-90a8-68729a98025c\") " pod="openstack/ceilometer-0" Oct 11 04:11:19 crc kubenswrapper[4798]: I1011 04:11:19.757486 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d40dd511-9540-43b0-b019-9d58ecb9a4fe-ovsdbserver-nb\") pod \"dnsmasq-dns-5b6dbdb6f5-w26gm\" (UID: \"d40dd511-9540-43b0-b019-9d58ecb9a4fe\") " pod="openstack/dnsmasq-dns-5b6dbdb6f5-w26gm" Oct 11 04:11:19 crc kubenswrapper[4798]: I1011 04:11:19.758536 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d40dd511-9540-43b0-b019-9d58ecb9a4fe-dns-svc\") pod \"dnsmasq-dns-5b6dbdb6f5-w26gm\" (UID: \"d40dd511-9540-43b0-b019-9d58ecb9a4fe\") " pod="openstack/dnsmasq-dns-5b6dbdb6f5-w26gm" Oct 11 04:11:19 crc kubenswrapper[4798]: I1011 04:11:19.759494 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d40dd511-9540-43b0-b019-9d58ecb9a4fe-ovsdbserver-sb\") pod \"dnsmasq-dns-5b6dbdb6f5-w26gm\" (UID: \"d40dd511-9540-43b0-b019-9d58ecb9a4fe\") " pod="openstack/dnsmasq-dns-5b6dbdb6f5-w26gm" Oct 11 04:11:19 crc kubenswrapper[4798]: I1011 04:11:19.760552 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0020f7ba-0c70-4bcf-90a8-68729a98025c-run-httpd\") pod \"ceilometer-0\" (UID: \"0020f7ba-0c70-4bcf-90a8-68729a98025c\") " pod="openstack/ceilometer-0" Oct 11 04:11:19 crc kubenswrapper[4798]: I1011 04:11:19.761150 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d40dd511-9540-43b0-b019-9d58ecb9a4fe-config\") pod \"dnsmasq-dns-5b6dbdb6f5-w26gm\" (UID: \"d40dd511-9540-43b0-b019-9d58ecb9a4fe\") " pod="openstack/dnsmasq-dns-5b6dbdb6f5-w26gm" Oct 11 04:11:19 crc kubenswrapper[4798]: I1011 04:11:19.761372 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0020f7ba-0c70-4bcf-90a8-68729a98025c-log-httpd\") pod \"ceilometer-0\" (UID: \"0020f7ba-0c70-4bcf-90a8-68729a98025c\") " pod="openstack/ceilometer-0" Oct 11 04:11:19 crc kubenswrapper[4798]: I1011 04:11:19.779605 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0020f7ba-0c70-4bcf-90a8-68729a98025c-config-data\") pod \"ceilometer-0\" (UID: \"0020f7ba-0c70-4bcf-90a8-68729a98025c\") " pod="openstack/ceilometer-0" Oct 11 04:11:19 crc kubenswrapper[4798]: I1011 04:11:19.780033 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0020f7ba-0c70-4bcf-90a8-68729a98025c-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"0020f7ba-0c70-4bcf-90a8-68729a98025c\") " pod="openstack/ceilometer-0" Oct 11 04:11:19 crc kubenswrapper[4798]: I1011 04:11:19.788050 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0020f7ba-0c70-4bcf-90a8-68729a98025c-scripts\") pod \"ceilometer-0\" (UID: \"0020f7ba-0c70-4bcf-90a8-68729a98025c\") " pod="openstack/ceilometer-0" Oct 11 04:11:19 crc kubenswrapper[4798]: I1011 04:11:19.791525 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/0020f7ba-0c70-4bcf-90a8-68729a98025c-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"0020f7ba-0c70-4bcf-90a8-68729a98025c\") " pod="openstack/ceilometer-0" Oct 11 04:11:19 crc kubenswrapper[4798]: I1011 04:11:19.792445 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rp9p8\" (UniqueName: \"kubernetes.io/projected/d40dd511-9540-43b0-b019-9d58ecb9a4fe-kube-api-access-rp9p8\") pod \"dnsmasq-dns-5b6dbdb6f5-w26gm\" (UID: \"d40dd511-9540-43b0-b019-9d58ecb9a4fe\") " pod="openstack/dnsmasq-dns-5b6dbdb6f5-w26gm" Oct 11 04:11:19 crc kubenswrapper[4798]: I1011 04:11:19.795018 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6ptmg\" (UniqueName: \"kubernetes.io/projected/0020f7ba-0c70-4bcf-90a8-68729a98025c-kube-api-access-6ptmg\") pod \"ceilometer-0\" (UID: \"0020f7ba-0c70-4bcf-90a8-68729a98025c\") " pod="openstack/ceilometer-0" Oct 11 04:11:19 crc kubenswrapper[4798]: I1011 04:11:19.798174 4798 scope.go:117] "RemoveContainer" containerID="3942ae0a86df15daef63bd65eee6e49642fc2045e328bf85151f9543deeef441" Oct 11 04:11:19 crc kubenswrapper[4798]: I1011 04:11:19.826418 4798 scope.go:117] "RemoveContainer" containerID="5cf0a0b8d8350bfdda59d6790ac94f55d5cc147be0ea4e48fab62b8914c4977a" Oct 11 04:11:19 crc kubenswrapper[4798]: E1011 04:11:19.827175 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5cf0a0b8d8350bfdda59d6790ac94f55d5cc147be0ea4e48fab62b8914c4977a\": container with ID starting with 5cf0a0b8d8350bfdda59d6790ac94f55d5cc147be0ea4e48fab62b8914c4977a not found: ID does not exist" containerID="5cf0a0b8d8350bfdda59d6790ac94f55d5cc147be0ea4e48fab62b8914c4977a" Oct 11 04:11:19 crc kubenswrapper[4798]: I1011 04:11:19.827213 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5cf0a0b8d8350bfdda59d6790ac94f55d5cc147be0ea4e48fab62b8914c4977a"} err="failed to get container status \"5cf0a0b8d8350bfdda59d6790ac94f55d5cc147be0ea4e48fab62b8914c4977a\": rpc error: code = NotFound desc = could not find container \"5cf0a0b8d8350bfdda59d6790ac94f55d5cc147be0ea4e48fab62b8914c4977a\": container with ID starting with 5cf0a0b8d8350bfdda59d6790ac94f55d5cc147be0ea4e48fab62b8914c4977a not found: ID does not exist" Oct 11 04:11:19 crc kubenswrapper[4798]: I1011 04:11:19.827242 4798 scope.go:117] "RemoveContainer" containerID="3942ae0a86df15daef63bd65eee6e49642fc2045e328bf85151f9543deeef441" Oct 11 04:11:19 crc kubenswrapper[4798]: E1011 04:11:19.827482 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3942ae0a86df15daef63bd65eee6e49642fc2045e328bf85151f9543deeef441\": container with ID starting with 3942ae0a86df15daef63bd65eee6e49642fc2045e328bf85151f9543deeef441 not found: ID does not exist" containerID="3942ae0a86df15daef63bd65eee6e49642fc2045e328bf85151f9543deeef441" Oct 11 04:11:19 crc kubenswrapper[4798]: I1011 04:11:19.827508 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3942ae0a86df15daef63bd65eee6e49642fc2045e328bf85151f9543deeef441"} err="failed to get container status \"3942ae0a86df15daef63bd65eee6e49642fc2045e328bf85151f9543deeef441\": rpc error: code = NotFound desc = could not find container \"3942ae0a86df15daef63bd65eee6e49642fc2045e328bf85151f9543deeef441\": container with ID starting with 3942ae0a86df15daef63bd65eee6e49642fc2045e328bf85151f9543deeef441 not found: ID does not exist" Oct 11 04:11:19 crc kubenswrapper[4798]: I1011 04:11:19.859383 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/482591d3-dd16-4a35-9c65-45ab2ad7d057-config\") pod \"482591d3-dd16-4a35-9c65-45ab2ad7d057\" (UID: \"482591d3-dd16-4a35-9c65-45ab2ad7d057\") " Oct 11 04:11:19 crc kubenswrapper[4798]: I1011 04:11:19.861040 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/482591d3-dd16-4a35-9c65-45ab2ad7d057-dns-svc\") pod \"482591d3-dd16-4a35-9c65-45ab2ad7d057\" (UID: \"482591d3-dd16-4a35-9c65-45ab2ad7d057\") " Oct 11 04:11:19 crc kubenswrapper[4798]: I1011 04:11:19.861141 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/482591d3-dd16-4a35-9c65-45ab2ad7d057-ovsdbserver-sb\") pod \"482591d3-dd16-4a35-9c65-45ab2ad7d057\" (UID: \"482591d3-dd16-4a35-9c65-45ab2ad7d057\") " Oct 11 04:11:19 crc kubenswrapper[4798]: I1011 04:11:19.861229 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/482591d3-dd16-4a35-9c65-45ab2ad7d057-ovsdbserver-nb\") pod \"482591d3-dd16-4a35-9c65-45ab2ad7d057\" (UID: \"482591d3-dd16-4a35-9c65-45ab2ad7d057\") " Oct 11 04:11:19 crc kubenswrapper[4798]: I1011 04:11:19.861436 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fz662\" (UniqueName: \"kubernetes.io/projected/482591d3-dd16-4a35-9c65-45ab2ad7d057-kube-api-access-fz662\") pod \"482591d3-dd16-4a35-9c65-45ab2ad7d057\" (UID: \"482591d3-dd16-4a35-9c65-45ab2ad7d057\") " Oct 11 04:11:19 crc kubenswrapper[4798]: I1011 04:11:19.868240 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/482591d3-dd16-4a35-9c65-45ab2ad7d057-kube-api-access-fz662" (OuterVolumeSpecName: "kube-api-access-fz662") pod "482591d3-dd16-4a35-9c65-45ab2ad7d057" (UID: "482591d3-dd16-4a35-9c65-45ab2ad7d057"). InnerVolumeSpecName "kube-api-access-fz662". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 04:11:19 crc kubenswrapper[4798]: I1011 04:11:19.914503 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/482591d3-dd16-4a35-9c65-45ab2ad7d057-config" (OuterVolumeSpecName: "config") pod "482591d3-dd16-4a35-9c65-45ab2ad7d057" (UID: "482591d3-dd16-4a35-9c65-45ab2ad7d057"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 04:11:19 crc kubenswrapper[4798]: I1011 04:11:19.924337 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/482591d3-dd16-4a35-9c65-45ab2ad7d057-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "482591d3-dd16-4a35-9c65-45ab2ad7d057" (UID: "482591d3-dd16-4a35-9c65-45ab2ad7d057"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 04:11:19 crc kubenswrapper[4798]: I1011 04:11:19.942108 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/482591d3-dd16-4a35-9c65-45ab2ad7d057-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "482591d3-dd16-4a35-9c65-45ab2ad7d057" (UID: "482591d3-dd16-4a35-9c65-45ab2ad7d057"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 04:11:19 crc kubenswrapper[4798]: I1011 04:11:19.943877 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/482591d3-dd16-4a35-9c65-45ab2ad7d057-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "482591d3-dd16-4a35-9c65-45ab2ad7d057" (UID: "482591d3-dd16-4a35-9c65-45ab2ad7d057"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 04:11:19 crc kubenswrapper[4798]: I1011 04:11:19.957834 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-pjqvn" Oct 11 04:11:19 crc kubenswrapper[4798]: I1011 04:11:19.965126 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fz662\" (UniqueName: \"kubernetes.io/projected/482591d3-dd16-4a35-9c65-45ab2ad7d057-kube-api-access-fz662\") on node \"crc\" DevicePath \"\"" Oct 11 04:11:19 crc kubenswrapper[4798]: I1011 04:11:19.965176 4798 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/482591d3-dd16-4a35-9c65-45ab2ad7d057-config\") on node \"crc\" DevicePath \"\"" Oct 11 04:11:19 crc kubenswrapper[4798]: I1011 04:11:19.965190 4798 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/482591d3-dd16-4a35-9c65-45ab2ad7d057-dns-svc\") on node \"crc\" DevicePath \"\"" Oct 11 04:11:19 crc kubenswrapper[4798]: I1011 04:11:19.965205 4798 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/482591d3-dd16-4a35-9c65-45ab2ad7d057-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Oct 11 04:11:19 crc kubenswrapper[4798]: I1011 04:11:19.965221 4798 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/482591d3-dd16-4a35-9c65-45ab2ad7d057-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Oct 11 04:11:19 crc kubenswrapper[4798]: I1011 04:11:19.987272 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5b6dbdb6f5-w26gm" Oct 11 04:11:20 crc kubenswrapper[4798]: I1011 04:11:20.019856 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Oct 11 04:11:20 crc kubenswrapper[4798]: I1011 04:11:20.102354 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-qf994"] Oct 11 04:11:20 crc kubenswrapper[4798]: I1011 04:11:20.134301 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-646e-account-create-c766h" Oct 11 04:11:20 crc kubenswrapper[4798]: I1011 04:11:20.243105 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-67795cd9-bmgk8"] Oct 11 04:11:20 crc kubenswrapper[4798]: I1011 04:11:20.273606 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4kjkd\" (UniqueName: \"kubernetes.io/projected/08c72420-5d00-49d3-aabb-527a1e9c22fb-kube-api-access-4kjkd\") pod \"08c72420-5d00-49d3-aabb-527a1e9c22fb\" (UID: \"08c72420-5d00-49d3-aabb-527a1e9c22fb\") " Oct 11 04:11:20 crc kubenswrapper[4798]: I1011 04:11:20.284083 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-4c2c-account-create-7gnkg" Oct 11 04:11:20 crc kubenswrapper[4798]: I1011 04:11:20.285230 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/08c72420-5d00-49d3-aabb-527a1e9c22fb-kube-api-access-4kjkd" (OuterVolumeSpecName: "kube-api-access-4kjkd") pod "08c72420-5d00-49d3-aabb-527a1e9c22fb" (UID: "08c72420-5d00-49d3-aabb-527a1e9c22fb"). InnerVolumeSpecName "kube-api-access-4kjkd". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 04:11:20 crc kubenswrapper[4798]: I1011 04:11:20.376071 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tlfx9\" (UniqueName: \"kubernetes.io/projected/af88f788-8ce4-4cf3-b570-4cc85140cb48-kube-api-access-tlfx9\") pod \"af88f788-8ce4-4cf3-b570-4cc85140cb48\" (UID: \"af88f788-8ce4-4cf3-b570-4cc85140cb48\") " Oct 11 04:11:20 crc kubenswrapper[4798]: I1011 04:11:20.376829 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4kjkd\" (UniqueName: \"kubernetes.io/projected/08c72420-5d00-49d3-aabb-527a1e9c22fb-kube-api-access-4kjkd\") on node \"crc\" DevicePath \"\"" Oct 11 04:11:20 crc kubenswrapper[4798]: I1011 04:11:20.382633 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/af88f788-8ce4-4cf3-b570-4cc85140cb48-kube-api-access-tlfx9" (OuterVolumeSpecName: "kube-api-access-tlfx9") pod "af88f788-8ce4-4cf3-b570-4cc85140cb48" (UID: "af88f788-8ce4-4cf3-b570-4cc85140cb48"). InnerVolumeSpecName "kube-api-access-tlfx9". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 04:11:20 crc kubenswrapper[4798]: I1011 04:11:20.478670 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tlfx9\" (UniqueName: \"kubernetes.io/projected/af88f788-8ce4-4cf3-b570-4cc85140cb48-kube-api-access-tlfx9\") on node \"crc\" DevicePath \"\"" Oct 11 04:11:20 crc kubenswrapper[4798]: I1011 04:11:20.606423 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-pjqvn"] Oct 11 04:11:20 crc kubenswrapper[4798]: I1011 04:11:20.721932 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-554567b4f7-z8l5w" Oct 11 04:11:20 crc kubenswrapper[4798]: I1011 04:11:20.733702 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Oct 11 04:11:20 crc kubenswrapper[4798]: I1011 04:11:20.749552 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5b6dbdb6f5-w26gm"] Oct 11 04:11:20 crc kubenswrapper[4798]: I1011 04:11:20.757047 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-67795cd9-bmgk8" event={"ID":"84ab6ec2-5733-4753-afa0-53e9437b7fc2","Type":"ContainerStarted","Data":"27f8da2919a54f7ea2b5169a4eb56cfa91d6aec6bfb2785ea9fb67e869d4fc30"} Oct 11 04:11:20 crc kubenswrapper[4798]: I1011 04:11:20.757105 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-67795cd9-bmgk8" event={"ID":"84ab6ec2-5733-4753-afa0-53e9437b7fc2","Type":"ContainerStarted","Data":"f96caef3e43ac88009a402a9248fce4478caaca26a7e5f03e901fc16bce45979"} Oct 11 04:11:20 crc kubenswrapper[4798]: I1011 04:11:20.761313 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-646e-account-create-c766h" Oct 11 04:11:20 crc kubenswrapper[4798]: I1011 04:11:20.761933 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-646e-account-create-c766h" event={"ID":"08c72420-5d00-49d3-aabb-527a1e9c22fb","Type":"ContainerDied","Data":"2be99da2a24fa9c09dca8768f31db19dae255db44a06821c7feeaf00ad3d00ed"} Oct 11 04:11:20 crc kubenswrapper[4798]: I1011 04:11:20.762000 4798 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2be99da2a24fa9c09dca8768f31db19dae255db44a06821c7feeaf00ad3d00ed" Oct 11 04:11:20 crc kubenswrapper[4798]: I1011 04:11:20.766667 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-4c2c-account-create-7gnkg" event={"ID":"af88f788-8ce4-4cf3-b570-4cc85140cb48","Type":"ContainerDied","Data":"6be99e8bc9011e0dea6b141a1b50cdacf76268b645f82ab8bfb9baa99b36431d"} Oct 11 04:11:20 crc kubenswrapper[4798]: I1011 04:11:20.766702 4798 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6be99e8bc9011e0dea6b141a1b50cdacf76268b645f82ab8bfb9baa99b36431d" Oct 11 04:11:20 crc kubenswrapper[4798]: I1011 04:11:20.766786 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-4c2c-account-create-7gnkg" Oct 11 04:11:20 crc kubenswrapper[4798]: I1011 04:11:20.786117 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-pjqvn" event={"ID":"00f2a7d2-ced9-4a97-97d9-e002e8b0ebbd","Type":"ContainerStarted","Data":"0a564841764620effddf7b1fbab2010473c92cfca8d789aaeaa785825e16d03c"} Oct 11 04:11:20 crc kubenswrapper[4798]: I1011 04:11:20.797520 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-qf994" event={"ID":"1e51ab81-3ca9-4cc2-8292-6a6e56d508f2","Type":"ContainerStarted","Data":"4a255db8dd7d1dc2df8990b631f417baf82fd680863e71a683d0e88e8a4b6e89"} Oct 11 04:11:20 crc kubenswrapper[4798]: I1011 04:11:20.797631 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-qf994" event={"ID":"1e51ab81-3ca9-4cc2-8292-6a6e56d508f2","Type":"ContainerStarted","Data":"826b4c3bf0319d0aa4255d60e1bb59d02494eedd975003de41b03edbbad5b6a3"} Oct 11 04:11:20 crc kubenswrapper[4798]: I1011 04:11:20.839601 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-qf994" podStartSLOduration=2.839566215 podStartE2EDuration="2.839566215s" podCreationTimestamp="2025-10-11 04:11:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 04:11:20.830505178 +0000 UTC m=+976.166794864" watchObservedRunningTime="2025-10-11 04:11:20.839566215 +0000 UTC m=+976.175855901" Oct 11 04:11:20 crc kubenswrapper[4798]: I1011 04:11:20.970488 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-554567b4f7-z8l5w"] Oct 11 04:11:20 crc kubenswrapper[4798]: I1011 04:11:20.979095 4798 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-554567b4f7-z8l5w"] Oct 11 04:11:21 crc kubenswrapper[4798]: I1011 04:11:21.089515 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-67795cd9-bmgk8" Oct 11 04:11:21 crc kubenswrapper[4798]: I1011 04:11:21.210417 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-b6slz\" (UniqueName: \"kubernetes.io/projected/84ab6ec2-5733-4753-afa0-53e9437b7fc2-kube-api-access-b6slz\") pod \"84ab6ec2-5733-4753-afa0-53e9437b7fc2\" (UID: \"84ab6ec2-5733-4753-afa0-53e9437b7fc2\") " Oct 11 04:11:21 crc kubenswrapper[4798]: I1011 04:11:21.210735 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/84ab6ec2-5733-4753-afa0-53e9437b7fc2-config\") pod \"84ab6ec2-5733-4753-afa0-53e9437b7fc2\" (UID: \"84ab6ec2-5733-4753-afa0-53e9437b7fc2\") " Oct 11 04:11:21 crc kubenswrapper[4798]: I1011 04:11:21.210918 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/84ab6ec2-5733-4753-afa0-53e9437b7fc2-dns-svc\") pod \"84ab6ec2-5733-4753-afa0-53e9437b7fc2\" (UID: \"84ab6ec2-5733-4753-afa0-53e9437b7fc2\") " Oct 11 04:11:21 crc kubenswrapper[4798]: I1011 04:11:21.211018 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/84ab6ec2-5733-4753-afa0-53e9437b7fc2-ovsdbserver-nb\") pod \"84ab6ec2-5733-4753-afa0-53e9437b7fc2\" (UID: \"84ab6ec2-5733-4753-afa0-53e9437b7fc2\") " Oct 11 04:11:21 crc kubenswrapper[4798]: I1011 04:11:21.211070 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/84ab6ec2-5733-4753-afa0-53e9437b7fc2-ovsdbserver-sb\") pod \"84ab6ec2-5733-4753-afa0-53e9437b7fc2\" (UID: \"84ab6ec2-5733-4753-afa0-53e9437b7fc2\") " Oct 11 04:11:21 crc kubenswrapper[4798]: I1011 04:11:21.216800 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/84ab6ec2-5733-4753-afa0-53e9437b7fc2-kube-api-access-b6slz" (OuterVolumeSpecName: "kube-api-access-b6slz") pod "84ab6ec2-5733-4753-afa0-53e9437b7fc2" (UID: "84ab6ec2-5733-4753-afa0-53e9437b7fc2"). InnerVolumeSpecName "kube-api-access-b6slz". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 04:11:21 crc kubenswrapper[4798]: I1011 04:11:21.240598 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/84ab6ec2-5733-4753-afa0-53e9437b7fc2-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "84ab6ec2-5733-4753-afa0-53e9437b7fc2" (UID: "84ab6ec2-5733-4753-afa0-53e9437b7fc2"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 04:11:21 crc kubenswrapper[4798]: I1011 04:11:21.253928 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/84ab6ec2-5733-4753-afa0-53e9437b7fc2-config" (OuterVolumeSpecName: "config") pod "84ab6ec2-5733-4753-afa0-53e9437b7fc2" (UID: "84ab6ec2-5733-4753-afa0-53e9437b7fc2"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 04:11:21 crc kubenswrapper[4798]: I1011 04:11:21.259369 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/84ab6ec2-5733-4753-afa0-53e9437b7fc2-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "84ab6ec2-5733-4753-afa0-53e9437b7fc2" (UID: "84ab6ec2-5733-4753-afa0-53e9437b7fc2"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 04:11:21 crc kubenswrapper[4798]: I1011 04:11:21.265656 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/84ab6ec2-5733-4753-afa0-53e9437b7fc2-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "84ab6ec2-5733-4753-afa0-53e9437b7fc2" (UID: "84ab6ec2-5733-4753-afa0-53e9437b7fc2"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 04:11:21 crc kubenswrapper[4798]: I1011 04:11:21.312332 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Oct 11 04:11:21 crc kubenswrapper[4798]: I1011 04:11:21.314666 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-b6slz\" (UniqueName: \"kubernetes.io/projected/84ab6ec2-5733-4753-afa0-53e9437b7fc2-kube-api-access-b6slz\") on node \"crc\" DevicePath \"\"" Oct 11 04:11:21 crc kubenswrapper[4798]: I1011 04:11:21.314812 4798 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/84ab6ec2-5733-4753-afa0-53e9437b7fc2-config\") on node \"crc\" DevicePath \"\"" Oct 11 04:11:21 crc kubenswrapper[4798]: I1011 04:11:21.314922 4798 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/84ab6ec2-5733-4753-afa0-53e9437b7fc2-dns-svc\") on node \"crc\" DevicePath \"\"" Oct 11 04:11:21 crc kubenswrapper[4798]: I1011 04:11:21.315021 4798 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/84ab6ec2-5733-4753-afa0-53e9437b7fc2-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Oct 11 04:11:21 crc kubenswrapper[4798]: I1011 04:11:21.315102 4798 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/84ab6ec2-5733-4753-afa0-53e9437b7fc2-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Oct 11 04:11:21 crc kubenswrapper[4798]: I1011 04:11:21.444744 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="482591d3-dd16-4a35-9c65-45ab2ad7d057" path="/var/lib/kubelet/pods/482591d3-dd16-4a35-9c65-45ab2ad7d057/volumes" Oct 11 04:11:21 crc kubenswrapper[4798]: I1011 04:11:21.808707 4798 generic.go:334] "Generic (PLEG): container finished" podID="84ab6ec2-5733-4753-afa0-53e9437b7fc2" containerID="27f8da2919a54f7ea2b5169a4eb56cfa91d6aec6bfb2785ea9fb67e869d4fc30" exitCode=0 Oct 11 04:11:21 crc kubenswrapper[4798]: I1011 04:11:21.808790 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-67795cd9-bmgk8" event={"ID":"84ab6ec2-5733-4753-afa0-53e9437b7fc2","Type":"ContainerDied","Data":"27f8da2919a54f7ea2b5169a4eb56cfa91d6aec6bfb2785ea9fb67e869d4fc30"} Oct 11 04:11:21 crc kubenswrapper[4798]: I1011 04:11:21.808826 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-67795cd9-bmgk8" event={"ID":"84ab6ec2-5733-4753-afa0-53e9437b7fc2","Type":"ContainerDied","Data":"f96caef3e43ac88009a402a9248fce4478caaca26a7e5f03e901fc16bce45979"} Oct 11 04:11:21 crc kubenswrapper[4798]: I1011 04:11:21.808852 4798 scope.go:117] "RemoveContainer" containerID="27f8da2919a54f7ea2b5169a4eb56cfa91d6aec6bfb2785ea9fb67e869d4fc30" Oct 11 04:11:21 crc kubenswrapper[4798]: I1011 04:11:21.808858 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-67795cd9-bmgk8" Oct 11 04:11:21 crc kubenswrapper[4798]: I1011 04:11:21.811956 4798 generic.go:334] "Generic (PLEG): container finished" podID="d40dd511-9540-43b0-b019-9d58ecb9a4fe" containerID="c8d059d98465fae0ff674811da11fb9a7c1d6c703188ffda63d6088ba539b311" exitCode=0 Oct 11 04:11:21 crc kubenswrapper[4798]: I1011 04:11:21.812095 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5b6dbdb6f5-w26gm" event={"ID":"d40dd511-9540-43b0-b019-9d58ecb9a4fe","Type":"ContainerDied","Data":"c8d059d98465fae0ff674811da11fb9a7c1d6c703188ffda63d6088ba539b311"} Oct 11 04:11:21 crc kubenswrapper[4798]: I1011 04:11:21.812165 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5b6dbdb6f5-w26gm" event={"ID":"d40dd511-9540-43b0-b019-9d58ecb9a4fe","Type":"ContainerStarted","Data":"ffe0614ecc04f9b1d5edacd70c87c02091096cd912e047f25fb30ef0e6a2dd97"} Oct 11 04:11:21 crc kubenswrapper[4798]: I1011 04:11:21.814149 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0020f7ba-0c70-4bcf-90a8-68729a98025c","Type":"ContainerStarted","Data":"f5adbe3bfcc9a4a4158383b37266b4322e7399f938013e2801ca7ac3049fb7fc"} Oct 11 04:11:21 crc kubenswrapper[4798]: I1011 04:11:21.906928 4798 scope.go:117] "RemoveContainer" containerID="27f8da2919a54f7ea2b5169a4eb56cfa91d6aec6bfb2785ea9fb67e869d4fc30" Oct 11 04:11:21 crc kubenswrapper[4798]: E1011 04:11:21.908385 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"27f8da2919a54f7ea2b5169a4eb56cfa91d6aec6bfb2785ea9fb67e869d4fc30\": container with ID starting with 27f8da2919a54f7ea2b5169a4eb56cfa91d6aec6bfb2785ea9fb67e869d4fc30 not found: ID does not exist" containerID="27f8da2919a54f7ea2b5169a4eb56cfa91d6aec6bfb2785ea9fb67e869d4fc30" Oct 11 04:11:21 crc kubenswrapper[4798]: I1011 04:11:21.908446 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"27f8da2919a54f7ea2b5169a4eb56cfa91d6aec6bfb2785ea9fb67e869d4fc30"} err="failed to get container status \"27f8da2919a54f7ea2b5169a4eb56cfa91d6aec6bfb2785ea9fb67e869d4fc30\": rpc error: code = NotFound desc = could not find container \"27f8da2919a54f7ea2b5169a4eb56cfa91d6aec6bfb2785ea9fb67e869d4fc30\": container with ID starting with 27f8da2919a54f7ea2b5169a4eb56cfa91d6aec6bfb2785ea9fb67e869d4fc30 not found: ID does not exist" Oct 11 04:11:21 crc kubenswrapper[4798]: I1011 04:11:21.915458 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-67795cd9-bmgk8"] Oct 11 04:11:21 crc kubenswrapper[4798]: I1011 04:11:21.922597 4798 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-67795cd9-bmgk8"] Oct 11 04:11:22 crc kubenswrapper[4798]: I1011 04:11:22.531851 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-sync-2ltmb"] Oct 11 04:11:22 crc kubenswrapper[4798]: E1011 04:11:22.532817 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="482591d3-dd16-4a35-9c65-45ab2ad7d057" containerName="init" Oct 11 04:11:22 crc kubenswrapper[4798]: I1011 04:11:22.532837 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="482591d3-dd16-4a35-9c65-45ab2ad7d057" containerName="init" Oct 11 04:11:22 crc kubenswrapper[4798]: E1011 04:11:22.532869 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="af88f788-8ce4-4cf3-b570-4cc85140cb48" containerName="mariadb-account-create" Oct 11 04:11:22 crc kubenswrapper[4798]: I1011 04:11:22.532875 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="af88f788-8ce4-4cf3-b570-4cc85140cb48" containerName="mariadb-account-create" Oct 11 04:11:22 crc kubenswrapper[4798]: E1011 04:11:22.532894 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="482591d3-dd16-4a35-9c65-45ab2ad7d057" containerName="dnsmasq-dns" Oct 11 04:11:22 crc kubenswrapper[4798]: I1011 04:11:22.532901 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="482591d3-dd16-4a35-9c65-45ab2ad7d057" containerName="dnsmasq-dns" Oct 11 04:11:22 crc kubenswrapper[4798]: E1011 04:11:22.532915 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="84ab6ec2-5733-4753-afa0-53e9437b7fc2" containerName="init" Oct 11 04:11:22 crc kubenswrapper[4798]: I1011 04:11:22.532920 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="84ab6ec2-5733-4753-afa0-53e9437b7fc2" containerName="init" Oct 11 04:11:22 crc kubenswrapper[4798]: E1011 04:11:22.532930 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="08c72420-5d00-49d3-aabb-527a1e9c22fb" containerName="mariadb-account-create" Oct 11 04:11:22 crc kubenswrapper[4798]: I1011 04:11:22.532936 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="08c72420-5d00-49d3-aabb-527a1e9c22fb" containerName="mariadb-account-create" Oct 11 04:11:22 crc kubenswrapper[4798]: I1011 04:11:22.533097 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="af88f788-8ce4-4cf3-b570-4cc85140cb48" containerName="mariadb-account-create" Oct 11 04:11:22 crc kubenswrapper[4798]: I1011 04:11:22.533112 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="08c72420-5d00-49d3-aabb-527a1e9c22fb" containerName="mariadb-account-create" Oct 11 04:11:22 crc kubenswrapper[4798]: I1011 04:11:22.533125 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="84ab6ec2-5733-4753-afa0-53e9437b7fc2" containerName="init" Oct 11 04:11:22 crc kubenswrapper[4798]: I1011 04:11:22.533133 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="482591d3-dd16-4a35-9c65-45ab2ad7d057" containerName="dnsmasq-dns" Oct 11 04:11:22 crc kubenswrapper[4798]: I1011 04:11:22.535643 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-2ltmb" Oct 11 04:11:22 crc kubenswrapper[4798]: I1011 04:11:22.538823 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-cf94r" Oct 11 04:11:22 crc kubenswrapper[4798]: I1011 04:11:22.539095 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Oct 11 04:11:22 crc kubenswrapper[4798]: I1011 04:11:22.552503 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-2ltmb"] Oct 11 04:11:22 crc kubenswrapper[4798]: I1011 04:11:22.615701 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-sync-btwq2"] Oct 11 04:11:22 crc kubenswrapper[4798]: I1011 04:11:22.618425 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-btwq2" Oct 11 04:11:22 crc kubenswrapper[4798]: I1011 04:11:22.621198 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-rwc92" Oct 11 04:11:22 crc kubenswrapper[4798]: I1011 04:11:22.621778 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Oct 11 04:11:22 crc kubenswrapper[4798]: I1011 04:11:22.626218 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Oct 11 04:11:22 crc kubenswrapper[4798]: I1011 04:11:22.634878 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-btwq2"] Oct 11 04:11:22 crc kubenswrapper[4798]: I1011 04:11:22.678730 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/c7e46251-4b37-40c6-adbc-877857e4442b-etc-machine-id\") pod \"cinder-db-sync-btwq2\" (UID: \"c7e46251-4b37-40c6-adbc-877857e4442b\") " pod="openstack/cinder-db-sync-btwq2" Oct 11 04:11:22 crc kubenswrapper[4798]: I1011 04:11:22.678803 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/c7e46251-4b37-40c6-adbc-877857e4442b-db-sync-config-data\") pod \"cinder-db-sync-btwq2\" (UID: \"c7e46251-4b37-40c6-adbc-877857e4442b\") " pod="openstack/cinder-db-sync-btwq2" Oct 11 04:11:22 crc kubenswrapper[4798]: I1011 04:11:22.678892 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c7e46251-4b37-40c6-adbc-877857e4442b-config-data\") pod \"cinder-db-sync-btwq2\" (UID: \"c7e46251-4b37-40c6-adbc-877857e4442b\") " pod="openstack/cinder-db-sync-btwq2" Oct 11 04:11:22 crc kubenswrapper[4798]: I1011 04:11:22.678937 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c7e46251-4b37-40c6-adbc-877857e4442b-scripts\") pod \"cinder-db-sync-btwq2\" (UID: \"c7e46251-4b37-40c6-adbc-877857e4442b\") " pod="openstack/cinder-db-sync-btwq2" Oct 11 04:11:22 crc kubenswrapper[4798]: I1011 04:11:22.678968 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tc445\" (UniqueName: \"kubernetes.io/projected/c7e46251-4b37-40c6-adbc-877857e4442b-kube-api-access-tc445\") pod \"cinder-db-sync-btwq2\" (UID: \"c7e46251-4b37-40c6-adbc-877857e4442b\") " pod="openstack/cinder-db-sync-btwq2" Oct 11 04:11:22 crc kubenswrapper[4798]: I1011 04:11:22.678993 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-824ld\" (UniqueName: \"kubernetes.io/projected/79172d81-dfa6-4863-afbe-1e8c8b622f6d-kube-api-access-824ld\") pod \"barbican-db-sync-2ltmb\" (UID: \"79172d81-dfa6-4863-afbe-1e8c8b622f6d\") " pod="openstack/barbican-db-sync-2ltmb" Oct 11 04:11:22 crc kubenswrapper[4798]: I1011 04:11:22.679035 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/79172d81-dfa6-4863-afbe-1e8c8b622f6d-db-sync-config-data\") pod \"barbican-db-sync-2ltmb\" (UID: \"79172d81-dfa6-4863-afbe-1e8c8b622f6d\") " pod="openstack/barbican-db-sync-2ltmb" Oct 11 04:11:22 crc kubenswrapper[4798]: I1011 04:11:22.679527 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c7e46251-4b37-40c6-adbc-877857e4442b-combined-ca-bundle\") pod \"cinder-db-sync-btwq2\" (UID: \"c7e46251-4b37-40c6-adbc-877857e4442b\") " pod="openstack/cinder-db-sync-btwq2" Oct 11 04:11:22 crc kubenswrapper[4798]: I1011 04:11:22.683174 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/79172d81-dfa6-4863-afbe-1e8c8b622f6d-combined-ca-bundle\") pod \"barbican-db-sync-2ltmb\" (UID: \"79172d81-dfa6-4863-afbe-1e8c8b622f6d\") " pod="openstack/barbican-db-sync-2ltmb" Oct 11 04:11:22 crc kubenswrapper[4798]: I1011 04:11:22.785064 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/79172d81-dfa6-4863-afbe-1e8c8b622f6d-combined-ca-bundle\") pod \"barbican-db-sync-2ltmb\" (UID: \"79172d81-dfa6-4863-afbe-1e8c8b622f6d\") " pod="openstack/barbican-db-sync-2ltmb" Oct 11 04:11:22 crc kubenswrapper[4798]: I1011 04:11:22.785143 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/c7e46251-4b37-40c6-adbc-877857e4442b-etc-machine-id\") pod \"cinder-db-sync-btwq2\" (UID: \"c7e46251-4b37-40c6-adbc-877857e4442b\") " pod="openstack/cinder-db-sync-btwq2" Oct 11 04:11:22 crc kubenswrapper[4798]: I1011 04:11:22.785174 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/c7e46251-4b37-40c6-adbc-877857e4442b-db-sync-config-data\") pod \"cinder-db-sync-btwq2\" (UID: \"c7e46251-4b37-40c6-adbc-877857e4442b\") " pod="openstack/cinder-db-sync-btwq2" Oct 11 04:11:22 crc kubenswrapper[4798]: I1011 04:11:22.785217 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c7e46251-4b37-40c6-adbc-877857e4442b-config-data\") pod \"cinder-db-sync-btwq2\" (UID: \"c7e46251-4b37-40c6-adbc-877857e4442b\") " pod="openstack/cinder-db-sync-btwq2" Oct 11 04:11:22 crc kubenswrapper[4798]: I1011 04:11:22.785247 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c7e46251-4b37-40c6-adbc-877857e4442b-scripts\") pod \"cinder-db-sync-btwq2\" (UID: \"c7e46251-4b37-40c6-adbc-877857e4442b\") " pod="openstack/cinder-db-sync-btwq2" Oct 11 04:11:22 crc kubenswrapper[4798]: I1011 04:11:22.785270 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tc445\" (UniqueName: \"kubernetes.io/projected/c7e46251-4b37-40c6-adbc-877857e4442b-kube-api-access-tc445\") pod \"cinder-db-sync-btwq2\" (UID: \"c7e46251-4b37-40c6-adbc-877857e4442b\") " pod="openstack/cinder-db-sync-btwq2" Oct 11 04:11:22 crc kubenswrapper[4798]: I1011 04:11:22.785290 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-824ld\" (UniqueName: \"kubernetes.io/projected/79172d81-dfa6-4863-afbe-1e8c8b622f6d-kube-api-access-824ld\") pod \"barbican-db-sync-2ltmb\" (UID: \"79172d81-dfa6-4863-afbe-1e8c8b622f6d\") " pod="openstack/barbican-db-sync-2ltmb" Oct 11 04:11:22 crc kubenswrapper[4798]: I1011 04:11:22.785315 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/79172d81-dfa6-4863-afbe-1e8c8b622f6d-db-sync-config-data\") pod \"barbican-db-sync-2ltmb\" (UID: \"79172d81-dfa6-4863-afbe-1e8c8b622f6d\") " pod="openstack/barbican-db-sync-2ltmb" Oct 11 04:11:22 crc kubenswrapper[4798]: I1011 04:11:22.785421 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c7e46251-4b37-40c6-adbc-877857e4442b-combined-ca-bundle\") pod \"cinder-db-sync-btwq2\" (UID: \"c7e46251-4b37-40c6-adbc-877857e4442b\") " pod="openstack/cinder-db-sync-btwq2" Oct 11 04:11:22 crc kubenswrapper[4798]: I1011 04:11:22.785642 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/c7e46251-4b37-40c6-adbc-877857e4442b-etc-machine-id\") pod \"cinder-db-sync-btwq2\" (UID: \"c7e46251-4b37-40c6-adbc-877857e4442b\") " pod="openstack/cinder-db-sync-btwq2" Oct 11 04:11:22 crc kubenswrapper[4798]: I1011 04:11:22.791710 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c7e46251-4b37-40c6-adbc-877857e4442b-scripts\") pod \"cinder-db-sync-btwq2\" (UID: \"c7e46251-4b37-40c6-adbc-877857e4442b\") " pod="openstack/cinder-db-sync-btwq2" Oct 11 04:11:22 crc kubenswrapper[4798]: I1011 04:11:22.792189 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/c7e46251-4b37-40c6-adbc-877857e4442b-db-sync-config-data\") pod \"cinder-db-sync-btwq2\" (UID: \"c7e46251-4b37-40c6-adbc-877857e4442b\") " pod="openstack/cinder-db-sync-btwq2" Oct 11 04:11:22 crc kubenswrapper[4798]: I1011 04:11:22.792809 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c7e46251-4b37-40c6-adbc-877857e4442b-combined-ca-bundle\") pod \"cinder-db-sync-btwq2\" (UID: \"c7e46251-4b37-40c6-adbc-877857e4442b\") " pod="openstack/cinder-db-sync-btwq2" Oct 11 04:11:22 crc kubenswrapper[4798]: I1011 04:11:22.793930 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c7e46251-4b37-40c6-adbc-877857e4442b-config-data\") pod \"cinder-db-sync-btwq2\" (UID: \"c7e46251-4b37-40c6-adbc-877857e4442b\") " pod="openstack/cinder-db-sync-btwq2" Oct 11 04:11:22 crc kubenswrapper[4798]: I1011 04:11:22.803476 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/79172d81-dfa6-4863-afbe-1e8c8b622f6d-db-sync-config-data\") pod \"barbican-db-sync-2ltmb\" (UID: \"79172d81-dfa6-4863-afbe-1e8c8b622f6d\") " pod="openstack/barbican-db-sync-2ltmb" Oct 11 04:11:22 crc kubenswrapper[4798]: I1011 04:11:22.803654 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/79172d81-dfa6-4863-afbe-1e8c8b622f6d-combined-ca-bundle\") pod \"barbican-db-sync-2ltmb\" (UID: \"79172d81-dfa6-4863-afbe-1e8c8b622f6d\") " pod="openstack/barbican-db-sync-2ltmb" Oct 11 04:11:22 crc kubenswrapper[4798]: I1011 04:11:22.804269 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-824ld\" (UniqueName: \"kubernetes.io/projected/79172d81-dfa6-4863-afbe-1e8c8b622f6d-kube-api-access-824ld\") pod \"barbican-db-sync-2ltmb\" (UID: \"79172d81-dfa6-4863-afbe-1e8c8b622f6d\") " pod="openstack/barbican-db-sync-2ltmb" Oct 11 04:11:22 crc kubenswrapper[4798]: I1011 04:11:22.805179 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tc445\" (UniqueName: \"kubernetes.io/projected/c7e46251-4b37-40c6-adbc-877857e4442b-kube-api-access-tc445\") pod \"cinder-db-sync-btwq2\" (UID: \"c7e46251-4b37-40c6-adbc-877857e4442b\") " pod="openstack/cinder-db-sync-btwq2" Oct 11 04:11:22 crc kubenswrapper[4798]: I1011 04:11:22.830306 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5b6dbdb6f5-w26gm" event={"ID":"d40dd511-9540-43b0-b019-9d58ecb9a4fe","Type":"ContainerStarted","Data":"b1b32daac8bca48b9ec69c30e43e9bd7135e20631e212757ee0d99e68e9b0639"} Oct 11 04:11:22 crc kubenswrapper[4798]: I1011 04:11:22.830417 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5b6dbdb6f5-w26gm" Oct 11 04:11:22 crc kubenswrapper[4798]: I1011 04:11:22.857077 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-2ltmb" Oct 11 04:11:22 crc kubenswrapper[4798]: I1011 04:11:22.865350 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5b6dbdb6f5-w26gm" podStartSLOduration=3.865325703 podStartE2EDuration="3.865325703s" podCreationTimestamp="2025-10-11 04:11:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 04:11:22.855138328 +0000 UTC m=+978.191428024" watchObservedRunningTime="2025-10-11 04:11:22.865325703 +0000 UTC m=+978.201615389" Oct 11 04:11:22 crc kubenswrapper[4798]: I1011 04:11:22.938791 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-btwq2" Oct 11 04:11:23 crc kubenswrapper[4798]: I1011 04:11:23.390069 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-2ltmb"] Oct 11 04:11:23 crc kubenswrapper[4798]: I1011 04:11:23.443274 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="84ab6ec2-5733-4753-afa0-53e9437b7fc2" path="/var/lib/kubelet/pods/84ab6ec2-5733-4753-afa0-53e9437b7fc2/volumes" Oct 11 04:11:23 crc kubenswrapper[4798]: I1011 04:11:23.489342 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-btwq2"] Oct 11 04:11:23 crc kubenswrapper[4798]: I1011 04:11:23.857600 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-2ltmb" event={"ID":"79172d81-dfa6-4863-afbe-1e8c8b622f6d","Type":"ContainerStarted","Data":"1be0b9d4e17a4f1e52318173514b130751f2277bdd234fadb2d319de3dcadeee"} Oct 11 04:11:24 crc kubenswrapper[4798]: I1011 04:11:24.882015 4798 generic.go:334] "Generic (PLEG): container finished" podID="1e51ab81-3ca9-4cc2-8292-6a6e56d508f2" containerID="4a255db8dd7d1dc2df8990b631f417baf82fd680863e71a683d0e88e8a4b6e89" exitCode=0 Oct 11 04:11:24 crc kubenswrapper[4798]: I1011 04:11:24.882091 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-qf994" event={"ID":"1e51ab81-3ca9-4cc2-8292-6a6e56d508f2","Type":"ContainerDied","Data":"4a255db8dd7d1dc2df8990b631f417baf82fd680863e71a683d0e88e8a4b6e89"} Oct 11 04:11:25 crc kubenswrapper[4798]: I1011 04:11:25.895141 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-btwq2" event={"ID":"c7e46251-4b37-40c6-adbc-877857e4442b","Type":"ContainerStarted","Data":"077b33da092fa80665f7057ef42f321a5410358cd1f618dee0127da83b588155"} Oct 11 04:11:27 crc kubenswrapper[4798]: I1011 04:11:27.288129 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-01e2-account-create-d9hs4"] Oct 11 04:11:27 crc kubenswrapper[4798]: I1011 04:11:27.291368 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-01e2-account-create-d9hs4" Oct 11 04:11:27 crc kubenswrapper[4798]: I1011 04:11:27.295817 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-db-secret" Oct 11 04:11:27 crc kubenswrapper[4798]: I1011 04:11:27.306998 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-01e2-account-create-d9hs4"] Oct 11 04:11:27 crc kubenswrapper[4798]: I1011 04:11:27.401138 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mf6lp\" (UniqueName: \"kubernetes.io/projected/dea5f749-0ae2-4385-b9fd-675a6d651cd8-kube-api-access-mf6lp\") pod \"neutron-01e2-account-create-d9hs4\" (UID: \"dea5f749-0ae2-4385-b9fd-675a6d651cd8\") " pod="openstack/neutron-01e2-account-create-d9hs4" Oct 11 04:11:27 crc kubenswrapper[4798]: I1011 04:11:27.502927 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mf6lp\" (UniqueName: \"kubernetes.io/projected/dea5f749-0ae2-4385-b9fd-675a6d651cd8-kube-api-access-mf6lp\") pod \"neutron-01e2-account-create-d9hs4\" (UID: \"dea5f749-0ae2-4385-b9fd-675a6d651cd8\") " pod="openstack/neutron-01e2-account-create-d9hs4" Oct 11 04:11:27 crc kubenswrapper[4798]: I1011 04:11:27.530138 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mf6lp\" (UniqueName: \"kubernetes.io/projected/dea5f749-0ae2-4385-b9fd-675a6d651cd8-kube-api-access-mf6lp\") pod \"neutron-01e2-account-create-d9hs4\" (UID: \"dea5f749-0ae2-4385-b9fd-675a6d651cd8\") " pod="openstack/neutron-01e2-account-create-d9hs4" Oct 11 04:11:27 crc kubenswrapper[4798]: I1011 04:11:27.618608 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-01e2-account-create-d9hs4" Oct 11 04:11:27 crc kubenswrapper[4798]: I1011 04:11:27.922875 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-qf994" Oct 11 04:11:27 crc kubenswrapper[4798]: I1011 04:11:27.928367 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-qf994" event={"ID":"1e51ab81-3ca9-4cc2-8292-6a6e56d508f2","Type":"ContainerDied","Data":"826b4c3bf0319d0aa4255d60e1bb59d02494eedd975003de41b03edbbad5b6a3"} Oct 11 04:11:27 crc kubenswrapper[4798]: I1011 04:11:27.928459 4798 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="826b4c3bf0319d0aa4255d60e1bb59d02494eedd975003de41b03edbbad5b6a3" Oct 11 04:11:28 crc kubenswrapper[4798]: I1011 04:11:28.013867 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rp8t7\" (UniqueName: \"kubernetes.io/projected/1e51ab81-3ca9-4cc2-8292-6a6e56d508f2-kube-api-access-rp8t7\") pod \"1e51ab81-3ca9-4cc2-8292-6a6e56d508f2\" (UID: \"1e51ab81-3ca9-4cc2-8292-6a6e56d508f2\") " Oct 11 04:11:28 crc kubenswrapper[4798]: I1011 04:11:28.014446 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1e51ab81-3ca9-4cc2-8292-6a6e56d508f2-scripts\") pod \"1e51ab81-3ca9-4cc2-8292-6a6e56d508f2\" (UID: \"1e51ab81-3ca9-4cc2-8292-6a6e56d508f2\") " Oct 11 04:11:28 crc kubenswrapper[4798]: I1011 04:11:28.014977 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/1e51ab81-3ca9-4cc2-8292-6a6e56d508f2-credential-keys\") pod \"1e51ab81-3ca9-4cc2-8292-6a6e56d508f2\" (UID: \"1e51ab81-3ca9-4cc2-8292-6a6e56d508f2\") " Oct 11 04:11:28 crc kubenswrapper[4798]: I1011 04:11:28.015095 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/1e51ab81-3ca9-4cc2-8292-6a6e56d508f2-fernet-keys\") pod \"1e51ab81-3ca9-4cc2-8292-6a6e56d508f2\" (UID: \"1e51ab81-3ca9-4cc2-8292-6a6e56d508f2\") " Oct 11 04:11:28 crc kubenswrapper[4798]: I1011 04:11:28.015218 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1e51ab81-3ca9-4cc2-8292-6a6e56d508f2-combined-ca-bundle\") pod \"1e51ab81-3ca9-4cc2-8292-6a6e56d508f2\" (UID: \"1e51ab81-3ca9-4cc2-8292-6a6e56d508f2\") " Oct 11 04:11:28 crc kubenswrapper[4798]: I1011 04:11:28.015298 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1e51ab81-3ca9-4cc2-8292-6a6e56d508f2-config-data\") pod \"1e51ab81-3ca9-4cc2-8292-6a6e56d508f2\" (UID: \"1e51ab81-3ca9-4cc2-8292-6a6e56d508f2\") " Oct 11 04:11:28 crc kubenswrapper[4798]: I1011 04:11:28.019861 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1e51ab81-3ca9-4cc2-8292-6a6e56d508f2-scripts" (OuterVolumeSpecName: "scripts") pod "1e51ab81-3ca9-4cc2-8292-6a6e56d508f2" (UID: "1e51ab81-3ca9-4cc2-8292-6a6e56d508f2"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:11:28 crc kubenswrapper[4798]: I1011 04:11:28.020947 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1e51ab81-3ca9-4cc2-8292-6a6e56d508f2-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "1e51ab81-3ca9-4cc2-8292-6a6e56d508f2" (UID: "1e51ab81-3ca9-4cc2-8292-6a6e56d508f2"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:11:28 crc kubenswrapper[4798]: I1011 04:11:28.022320 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1e51ab81-3ca9-4cc2-8292-6a6e56d508f2-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "1e51ab81-3ca9-4cc2-8292-6a6e56d508f2" (UID: "1e51ab81-3ca9-4cc2-8292-6a6e56d508f2"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:11:28 crc kubenswrapper[4798]: I1011 04:11:28.024126 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1e51ab81-3ca9-4cc2-8292-6a6e56d508f2-kube-api-access-rp8t7" (OuterVolumeSpecName: "kube-api-access-rp8t7") pod "1e51ab81-3ca9-4cc2-8292-6a6e56d508f2" (UID: "1e51ab81-3ca9-4cc2-8292-6a6e56d508f2"). InnerVolumeSpecName "kube-api-access-rp8t7". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 04:11:28 crc kubenswrapper[4798]: I1011 04:11:28.045414 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1e51ab81-3ca9-4cc2-8292-6a6e56d508f2-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "1e51ab81-3ca9-4cc2-8292-6a6e56d508f2" (UID: "1e51ab81-3ca9-4cc2-8292-6a6e56d508f2"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:11:28 crc kubenswrapper[4798]: I1011 04:11:28.069048 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1e51ab81-3ca9-4cc2-8292-6a6e56d508f2-config-data" (OuterVolumeSpecName: "config-data") pod "1e51ab81-3ca9-4cc2-8292-6a6e56d508f2" (UID: "1e51ab81-3ca9-4cc2-8292-6a6e56d508f2"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:11:28 crc kubenswrapper[4798]: I1011 04:11:28.117596 4798 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1e51ab81-3ca9-4cc2-8292-6a6e56d508f2-scripts\") on node \"crc\" DevicePath \"\"" Oct 11 04:11:28 crc kubenswrapper[4798]: I1011 04:11:28.117638 4798 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/1e51ab81-3ca9-4cc2-8292-6a6e56d508f2-credential-keys\") on node \"crc\" DevicePath \"\"" Oct 11 04:11:28 crc kubenswrapper[4798]: I1011 04:11:28.117650 4798 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/1e51ab81-3ca9-4cc2-8292-6a6e56d508f2-fernet-keys\") on node \"crc\" DevicePath \"\"" Oct 11 04:11:28 crc kubenswrapper[4798]: I1011 04:11:28.117659 4798 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1e51ab81-3ca9-4cc2-8292-6a6e56d508f2-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 11 04:11:28 crc kubenswrapper[4798]: I1011 04:11:28.117669 4798 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1e51ab81-3ca9-4cc2-8292-6a6e56d508f2-config-data\") on node \"crc\" DevicePath \"\"" Oct 11 04:11:28 crc kubenswrapper[4798]: I1011 04:11:28.117679 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rp8t7\" (UniqueName: \"kubernetes.io/projected/1e51ab81-3ca9-4cc2-8292-6a6e56d508f2-kube-api-access-rp8t7\") on node \"crc\" DevicePath \"\"" Oct 11 04:11:28 crc kubenswrapper[4798]: W1011 04:11:28.290706 4798 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poddea5f749_0ae2_4385_b9fd_675a6d651cd8.slice/crio-638bc47a449f2199579ba2982426e922047b3fd31b804f006f9b36d0f7e320dc WatchSource:0}: Error finding container 638bc47a449f2199579ba2982426e922047b3fd31b804f006f9b36d0f7e320dc: Status 404 returned error can't find the container with id 638bc47a449f2199579ba2982426e922047b3fd31b804f006f9b36d0f7e320dc Oct 11 04:11:28 crc kubenswrapper[4798]: I1011 04:11:28.291675 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-01e2-account-create-d9hs4"] Oct 11 04:11:28 crc kubenswrapper[4798]: I1011 04:11:28.941492 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-pjqvn" event={"ID":"00f2a7d2-ced9-4a97-97d9-e002e8b0ebbd","Type":"ContainerStarted","Data":"7197dc6f8173a46b362dedb05e974a07ce4ae4188dc3c4a11df371869b498fbc"} Oct 11 04:11:28 crc kubenswrapper[4798]: I1011 04:11:28.943640 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0020f7ba-0c70-4bcf-90a8-68729a98025c","Type":"ContainerStarted","Data":"71a71593f08fe793927ee7efa2ee7dde883b2df0ced200ba8c2bb87304a7e14b"} Oct 11 04:11:28 crc kubenswrapper[4798]: I1011 04:11:28.949174 4798 generic.go:334] "Generic (PLEG): container finished" podID="dea5f749-0ae2-4385-b9fd-675a6d651cd8" containerID="d7b534cde5c58a64218ca1e17bf4d02a4350cb00ef91cce26941c9cdb2660a63" exitCode=0 Oct 11 04:11:28 crc kubenswrapper[4798]: I1011 04:11:28.949352 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-qf994" Oct 11 04:11:28 crc kubenswrapper[4798]: I1011 04:11:28.954339 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-01e2-account-create-d9hs4" event={"ID":"dea5f749-0ae2-4385-b9fd-675a6d651cd8","Type":"ContainerDied","Data":"d7b534cde5c58a64218ca1e17bf4d02a4350cb00ef91cce26941c9cdb2660a63"} Oct 11 04:11:28 crc kubenswrapper[4798]: I1011 04:11:28.954428 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-01e2-account-create-d9hs4" event={"ID":"dea5f749-0ae2-4385-b9fd-675a6d651cd8","Type":"ContainerStarted","Data":"638bc47a449f2199579ba2982426e922047b3fd31b804f006f9b36d0f7e320dc"} Oct 11 04:11:28 crc kubenswrapper[4798]: I1011 04:11:28.991504 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-db-sync-pjqvn" podStartSLOduration=2.8289632620000003 podStartE2EDuration="9.991482443s" podCreationTimestamp="2025-10-11 04:11:19 +0000 UTC" firstStartedPulling="2025-10-11 04:11:20.625701097 +0000 UTC m=+975.961990783" lastFinishedPulling="2025-10-11 04:11:27.788220278 +0000 UTC m=+983.124509964" observedRunningTime="2025-10-11 04:11:28.962427888 +0000 UTC m=+984.298717574" watchObservedRunningTime="2025-10-11 04:11:28.991482443 +0000 UTC m=+984.327772129" Oct 11 04:11:29 crc kubenswrapper[4798]: I1011 04:11:29.032717 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-qf994"] Oct 11 04:11:29 crc kubenswrapper[4798]: I1011 04:11:29.040684 4798 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-qf994"] Oct 11 04:11:29 crc kubenswrapper[4798]: I1011 04:11:29.129518 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-phfnr"] Oct 11 04:11:29 crc kubenswrapper[4798]: E1011 04:11:29.130777 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1e51ab81-3ca9-4cc2-8292-6a6e56d508f2" containerName="keystone-bootstrap" Oct 11 04:11:29 crc kubenswrapper[4798]: I1011 04:11:29.130805 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="1e51ab81-3ca9-4cc2-8292-6a6e56d508f2" containerName="keystone-bootstrap" Oct 11 04:11:29 crc kubenswrapper[4798]: I1011 04:11:29.135405 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="1e51ab81-3ca9-4cc2-8292-6a6e56d508f2" containerName="keystone-bootstrap" Oct 11 04:11:29 crc kubenswrapper[4798]: I1011 04:11:29.137644 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-phfnr" Oct 11 04:11:29 crc kubenswrapper[4798]: I1011 04:11:29.142466 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-768mh" Oct 11 04:11:29 crc kubenswrapper[4798]: I1011 04:11:29.143308 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Oct 11 04:11:29 crc kubenswrapper[4798]: I1011 04:11:29.143604 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Oct 11 04:11:29 crc kubenswrapper[4798]: I1011 04:11:29.144156 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-phfnr"] Oct 11 04:11:29 crc kubenswrapper[4798]: I1011 04:11:29.146586 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Oct 11 04:11:29 crc kubenswrapper[4798]: I1011 04:11:29.241511 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d696f8bc-027f-43c0-b58d-72ea3f3f9c5c-combined-ca-bundle\") pod \"keystone-bootstrap-phfnr\" (UID: \"d696f8bc-027f-43c0-b58d-72ea3f3f9c5c\") " pod="openstack/keystone-bootstrap-phfnr" Oct 11 04:11:29 crc kubenswrapper[4798]: I1011 04:11:29.241573 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d696f8bc-027f-43c0-b58d-72ea3f3f9c5c-scripts\") pod \"keystone-bootstrap-phfnr\" (UID: \"d696f8bc-027f-43c0-b58d-72ea3f3f9c5c\") " pod="openstack/keystone-bootstrap-phfnr" Oct 11 04:11:29 crc kubenswrapper[4798]: I1011 04:11:29.241627 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/d696f8bc-027f-43c0-b58d-72ea3f3f9c5c-fernet-keys\") pod \"keystone-bootstrap-phfnr\" (UID: \"d696f8bc-027f-43c0-b58d-72ea3f3f9c5c\") " pod="openstack/keystone-bootstrap-phfnr" Oct 11 04:11:29 crc kubenswrapper[4798]: I1011 04:11:29.241804 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w8k2w\" (UniqueName: \"kubernetes.io/projected/d696f8bc-027f-43c0-b58d-72ea3f3f9c5c-kube-api-access-w8k2w\") pod \"keystone-bootstrap-phfnr\" (UID: \"d696f8bc-027f-43c0-b58d-72ea3f3f9c5c\") " pod="openstack/keystone-bootstrap-phfnr" Oct 11 04:11:29 crc kubenswrapper[4798]: I1011 04:11:29.241933 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d696f8bc-027f-43c0-b58d-72ea3f3f9c5c-config-data\") pod \"keystone-bootstrap-phfnr\" (UID: \"d696f8bc-027f-43c0-b58d-72ea3f3f9c5c\") " pod="openstack/keystone-bootstrap-phfnr" Oct 11 04:11:29 crc kubenswrapper[4798]: I1011 04:11:29.241963 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/d696f8bc-027f-43c0-b58d-72ea3f3f9c5c-credential-keys\") pod \"keystone-bootstrap-phfnr\" (UID: \"d696f8bc-027f-43c0-b58d-72ea3f3f9c5c\") " pod="openstack/keystone-bootstrap-phfnr" Oct 11 04:11:29 crc kubenswrapper[4798]: I1011 04:11:29.343517 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d696f8bc-027f-43c0-b58d-72ea3f3f9c5c-scripts\") pod \"keystone-bootstrap-phfnr\" (UID: \"d696f8bc-027f-43c0-b58d-72ea3f3f9c5c\") " pod="openstack/keystone-bootstrap-phfnr" Oct 11 04:11:29 crc kubenswrapper[4798]: I1011 04:11:29.343579 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d696f8bc-027f-43c0-b58d-72ea3f3f9c5c-combined-ca-bundle\") pod \"keystone-bootstrap-phfnr\" (UID: \"d696f8bc-027f-43c0-b58d-72ea3f3f9c5c\") " pod="openstack/keystone-bootstrap-phfnr" Oct 11 04:11:29 crc kubenswrapper[4798]: I1011 04:11:29.343633 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/d696f8bc-027f-43c0-b58d-72ea3f3f9c5c-fernet-keys\") pod \"keystone-bootstrap-phfnr\" (UID: \"d696f8bc-027f-43c0-b58d-72ea3f3f9c5c\") " pod="openstack/keystone-bootstrap-phfnr" Oct 11 04:11:29 crc kubenswrapper[4798]: I1011 04:11:29.343680 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w8k2w\" (UniqueName: \"kubernetes.io/projected/d696f8bc-027f-43c0-b58d-72ea3f3f9c5c-kube-api-access-w8k2w\") pod \"keystone-bootstrap-phfnr\" (UID: \"d696f8bc-027f-43c0-b58d-72ea3f3f9c5c\") " pod="openstack/keystone-bootstrap-phfnr" Oct 11 04:11:29 crc kubenswrapper[4798]: I1011 04:11:29.343729 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d696f8bc-027f-43c0-b58d-72ea3f3f9c5c-config-data\") pod \"keystone-bootstrap-phfnr\" (UID: \"d696f8bc-027f-43c0-b58d-72ea3f3f9c5c\") " pod="openstack/keystone-bootstrap-phfnr" Oct 11 04:11:29 crc kubenswrapper[4798]: I1011 04:11:29.343756 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/d696f8bc-027f-43c0-b58d-72ea3f3f9c5c-credential-keys\") pod \"keystone-bootstrap-phfnr\" (UID: \"d696f8bc-027f-43c0-b58d-72ea3f3f9c5c\") " pod="openstack/keystone-bootstrap-phfnr" Oct 11 04:11:29 crc kubenswrapper[4798]: I1011 04:11:29.352057 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d696f8bc-027f-43c0-b58d-72ea3f3f9c5c-combined-ca-bundle\") pod \"keystone-bootstrap-phfnr\" (UID: \"d696f8bc-027f-43c0-b58d-72ea3f3f9c5c\") " pod="openstack/keystone-bootstrap-phfnr" Oct 11 04:11:29 crc kubenswrapper[4798]: I1011 04:11:29.352679 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d696f8bc-027f-43c0-b58d-72ea3f3f9c5c-config-data\") pod \"keystone-bootstrap-phfnr\" (UID: \"d696f8bc-027f-43c0-b58d-72ea3f3f9c5c\") " pod="openstack/keystone-bootstrap-phfnr" Oct 11 04:11:29 crc kubenswrapper[4798]: I1011 04:11:29.353169 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/d696f8bc-027f-43c0-b58d-72ea3f3f9c5c-fernet-keys\") pod \"keystone-bootstrap-phfnr\" (UID: \"d696f8bc-027f-43c0-b58d-72ea3f3f9c5c\") " pod="openstack/keystone-bootstrap-phfnr" Oct 11 04:11:29 crc kubenswrapper[4798]: I1011 04:11:29.357493 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/d696f8bc-027f-43c0-b58d-72ea3f3f9c5c-credential-keys\") pod \"keystone-bootstrap-phfnr\" (UID: \"d696f8bc-027f-43c0-b58d-72ea3f3f9c5c\") " pod="openstack/keystone-bootstrap-phfnr" Oct 11 04:11:29 crc kubenswrapper[4798]: I1011 04:11:29.361030 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d696f8bc-027f-43c0-b58d-72ea3f3f9c5c-scripts\") pod \"keystone-bootstrap-phfnr\" (UID: \"d696f8bc-027f-43c0-b58d-72ea3f3f9c5c\") " pod="openstack/keystone-bootstrap-phfnr" Oct 11 04:11:29 crc kubenswrapper[4798]: I1011 04:11:29.363706 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w8k2w\" (UniqueName: \"kubernetes.io/projected/d696f8bc-027f-43c0-b58d-72ea3f3f9c5c-kube-api-access-w8k2w\") pod \"keystone-bootstrap-phfnr\" (UID: \"d696f8bc-027f-43c0-b58d-72ea3f3f9c5c\") " pod="openstack/keystone-bootstrap-phfnr" Oct 11 04:11:29 crc kubenswrapper[4798]: I1011 04:11:29.445704 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1e51ab81-3ca9-4cc2-8292-6a6e56d508f2" path="/var/lib/kubelet/pods/1e51ab81-3ca9-4cc2-8292-6a6e56d508f2/volumes" Oct 11 04:11:29 crc kubenswrapper[4798]: I1011 04:11:29.468737 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-phfnr" Oct 11 04:11:29 crc kubenswrapper[4798]: I1011 04:11:29.967461 4798 generic.go:334] "Generic (PLEG): container finished" podID="00f2a7d2-ced9-4a97-97d9-e002e8b0ebbd" containerID="7197dc6f8173a46b362dedb05e974a07ce4ae4188dc3c4a11df371869b498fbc" exitCode=0 Oct 11 04:11:29 crc kubenswrapper[4798]: I1011 04:11:29.967583 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-pjqvn" event={"ID":"00f2a7d2-ced9-4a97-97d9-e002e8b0ebbd","Type":"ContainerDied","Data":"7197dc6f8173a46b362dedb05e974a07ce4ae4188dc3c4a11df371869b498fbc"} Oct 11 04:11:29 crc kubenswrapper[4798]: I1011 04:11:29.988931 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-5b6dbdb6f5-w26gm" Oct 11 04:11:30 crc kubenswrapper[4798]: I1011 04:11:30.068886 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-8554648995-lhhsb"] Oct 11 04:11:30 crc kubenswrapper[4798]: I1011 04:11:30.069206 4798 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-8554648995-lhhsb" podUID="8d972df7-8eff-4cc6-92a1-799f816bc198" containerName="dnsmasq-dns" containerID="cri-o://66e5712c05c3d0e3528cefaa70d49785b5d3d6f65c1e53b617ed5fffc130df03" gracePeriod=10 Oct 11 04:11:30 crc kubenswrapper[4798]: E1011 04:11:30.210838 4798 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8d972df7_8eff_4cc6_92a1_799f816bc198.slice/crio-66e5712c05c3d0e3528cefaa70d49785b5d3d6f65c1e53b617ed5fffc130df03.scope\": RecentStats: unable to find data in memory cache]" Oct 11 04:11:30 crc kubenswrapper[4798]: I1011 04:11:30.981146 4798 generic.go:334] "Generic (PLEG): container finished" podID="8d972df7-8eff-4cc6-92a1-799f816bc198" containerID="66e5712c05c3d0e3528cefaa70d49785b5d3d6f65c1e53b617ed5fffc130df03" exitCode=0 Oct 11 04:11:30 crc kubenswrapper[4798]: I1011 04:11:30.981228 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8554648995-lhhsb" event={"ID":"8d972df7-8eff-4cc6-92a1-799f816bc198","Type":"ContainerDied","Data":"66e5712c05c3d0e3528cefaa70d49785b5d3d6f65c1e53b617ed5fffc130df03"} Oct 11 04:11:31 crc kubenswrapper[4798]: I1011 04:11:31.136339 4798 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-8554648995-lhhsb" podUID="8d972df7-8eff-4cc6-92a1-799f816bc198" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.113:5353: connect: connection refused" Oct 11 04:11:32 crc kubenswrapper[4798]: I1011 04:11:32.348006 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-01e2-account-create-d9hs4" Oct 11 04:11:32 crc kubenswrapper[4798]: I1011 04:11:32.381893 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-pjqvn" Oct 11 04:11:32 crc kubenswrapper[4798]: I1011 04:11:32.453240 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mf6lp\" (UniqueName: \"kubernetes.io/projected/dea5f749-0ae2-4385-b9fd-675a6d651cd8-kube-api-access-mf6lp\") pod \"dea5f749-0ae2-4385-b9fd-675a6d651cd8\" (UID: \"dea5f749-0ae2-4385-b9fd-675a6d651cd8\") " Oct 11 04:11:32 crc kubenswrapper[4798]: I1011 04:11:32.459965 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dea5f749-0ae2-4385-b9fd-675a6d651cd8-kube-api-access-mf6lp" (OuterVolumeSpecName: "kube-api-access-mf6lp") pod "dea5f749-0ae2-4385-b9fd-675a6d651cd8" (UID: "dea5f749-0ae2-4385-b9fd-675a6d651cd8"). InnerVolumeSpecName "kube-api-access-mf6lp". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 04:11:33 crc kubenswrapper[4798]: I1011 04:11:32.554583 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/00f2a7d2-ced9-4a97-97d9-e002e8b0ebbd-scripts\") pod \"00f2a7d2-ced9-4a97-97d9-e002e8b0ebbd\" (UID: \"00f2a7d2-ced9-4a97-97d9-e002e8b0ebbd\") " Oct 11 04:11:33 crc kubenswrapper[4798]: I1011 04:11:32.556755 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8554648995-lhhsb" Oct 11 04:11:33 crc kubenswrapper[4798]: I1011 04:11:32.557951 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/00f2a7d2-ced9-4a97-97d9-e002e8b0ebbd-config-data\") pod \"00f2a7d2-ced9-4a97-97d9-e002e8b0ebbd\" (UID: \"00f2a7d2-ced9-4a97-97d9-e002e8b0ebbd\") " Oct 11 04:11:33 crc kubenswrapper[4798]: I1011 04:11:32.558035 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qz2js\" (UniqueName: \"kubernetes.io/projected/00f2a7d2-ced9-4a97-97d9-e002e8b0ebbd-kube-api-access-qz2js\") pod \"00f2a7d2-ced9-4a97-97d9-e002e8b0ebbd\" (UID: \"00f2a7d2-ced9-4a97-97d9-e002e8b0ebbd\") " Oct 11 04:11:33 crc kubenswrapper[4798]: I1011 04:11:32.558097 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/00f2a7d2-ced9-4a97-97d9-e002e8b0ebbd-combined-ca-bundle\") pod \"00f2a7d2-ced9-4a97-97d9-e002e8b0ebbd\" (UID: \"00f2a7d2-ced9-4a97-97d9-e002e8b0ebbd\") " Oct 11 04:11:33 crc kubenswrapper[4798]: I1011 04:11:32.558126 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/00f2a7d2-ced9-4a97-97d9-e002e8b0ebbd-logs\") pod \"00f2a7d2-ced9-4a97-97d9-e002e8b0ebbd\" (UID: \"00f2a7d2-ced9-4a97-97d9-e002e8b0ebbd\") " Oct 11 04:11:33 crc kubenswrapper[4798]: I1011 04:11:32.558922 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mf6lp\" (UniqueName: \"kubernetes.io/projected/dea5f749-0ae2-4385-b9fd-675a6d651cd8-kube-api-access-mf6lp\") on node \"crc\" DevicePath \"\"" Oct 11 04:11:33 crc kubenswrapper[4798]: I1011 04:11:32.559658 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/00f2a7d2-ced9-4a97-97d9-e002e8b0ebbd-scripts" (OuterVolumeSpecName: "scripts") pod "00f2a7d2-ced9-4a97-97d9-e002e8b0ebbd" (UID: "00f2a7d2-ced9-4a97-97d9-e002e8b0ebbd"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:11:33 crc kubenswrapper[4798]: I1011 04:11:32.559739 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/00f2a7d2-ced9-4a97-97d9-e002e8b0ebbd-logs" (OuterVolumeSpecName: "logs") pod "00f2a7d2-ced9-4a97-97d9-e002e8b0ebbd" (UID: "00f2a7d2-ced9-4a97-97d9-e002e8b0ebbd"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 04:11:33 crc kubenswrapper[4798]: I1011 04:11:32.561602 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/00f2a7d2-ced9-4a97-97d9-e002e8b0ebbd-kube-api-access-qz2js" (OuterVolumeSpecName: "kube-api-access-qz2js") pod "00f2a7d2-ced9-4a97-97d9-e002e8b0ebbd" (UID: "00f2a7d2-ced9-4a97-97d9-e002e8b0ebbd"). InnerVolumeSpecName "kube-api-access-qz2js". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 04:11:33 crc kubenswrapper[4798]: I1011 04:11:32.593531 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/00f2a7d2-ced9-4a97-97d9-e002e8b0ebbd-config-data" (OuterVolumeSpecName: "config-data") pod "00f2a7d2-ced9-4a97-97d9-e002e8b0ebbd" (UID: "00f2a7d2-ced9-4a97-97d9-e002e8b0ebbd"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:11:33 crc kubenswrapper[4798]: I1011 04:11:32.606684 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/00f2a7d2-ced9-4a97-97d9-e002e8b0ebbd-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "00f2a7d2-ced9-4a97-97d9-e002e8b0ebbd" (UID: "00f2a7d2-ced9-4a97-97d9-e002e8b0ebbd"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:11:33 crc kubenswrapper[4798]: I1011 04:11:32.661237 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w6bss\" (UniqueName: \"kubernetes.io/projected/8d972df7-8eff-4cc6-92a1-799f816bc198-kube-api-access-w6bss\") pod \"8d972df7-8eff-4cc6-92a1-799f816bc198\" (UID: \"8d972df7-8eff-4cc6-92a1-799f816bc198\") " Oct 11 04:11:33 crc kubenswrapper[4798]: I1011 04:11:32.661408 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8d972df7-8eff-4cc6-92a1-799f816bc198-ovsdbserver-nb\") pod \"8d972df7-8eff-4cc6-92a1-799f816bc198\" (UID: \"8d972df7-8eff-4cc6-92a1-799f816bc198\") " Oct 11 04:11:33 crc kubenswrapper[4798]: I1011 04:11:32.661438 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8d972df7-8eff-4cc6-92a1-799f816bc198-ovsdbserver-sb\") pod \"8d972df7-8eff-4cc6-92a1-799f816bc198\" (UID: \"8d972df7-8eff-4cc6-92a1-799f816bc198\") " Oct 11 04:11:33 crc kubenswrapper[4798]: I1011 04:11:32.661485 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8d972df7-8eff-4cc6-92a1-799f816bc198-dns-svc\") pod \"8d972df7-8eff-4cc6-92a1-799f816bc198\" (UID: \"8d972df7-8eff-4cc6-92a1-799f816bc198\") " Oct 11 04:11:33 crc kubenswrapper[4798]: I1011 04:11:32.661512 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8d972df7-8eff-4cc6-92a1-799f816bc198-config\") pod \"8d972df7-8eff-4cc6-92a1-799f816bc198\" (UID: \"8d972df7-8eff-4cc6-92a1-799f816bc198\") " Oct 11 04:11:33 crc kubenswrapper[4798]: I1011 04:11:32.661830 4798 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/00f2a7d2-ced9-4a97-97d9-e002e8b0ebbd-scripts\") on node \"crc\" DevicePath \"\"" Oct 11 04:11:33 crc kubenswrapper[4798]: I1011 04:11:32.661844 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qz2js\" (UniqueName: \"kubernetes.io/projected/00f2a7d2-ced9-4a97-97d9-e002e8b0ebbd-kube-api-access-qz2js\") on node \"crc\" DevicePath \"\"" Oct 11 04:11:33 crc kubenswrapper[4798]: I1011 04:11:32.661858 4798 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/00f2a7d2-ced9-4a97-97d9-e002e8b0ebbd-config-data\") on node \"crc\" DevicePath \"\"" Oct 11 04:11:33 crc kubenswrapper[4798]: I1011 04:11:32.661870 4798 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/00f2a7d2-ced9-4a97-97d9-e002e8b0ebbd-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 11 04:11:33 crc kubenswrapper[4798]: I1011 04:11:32.661884 4798 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/00f2a7d2-ced9-4a97-97d9-e002e8b0ebbd-logs\") on node \"crc\" DevicePath \"\"" Oct 11 04:11:33 crc kubenswrapper[4798]: I1011 04:11:32.666535 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8d972df7-8eff-4cc6-92a1-799f816bc198-kube-api-access-w6bss" (OuterVolumeSpecName: "kube-api-access-w6bss") pod "8d972df7-8eff-4cc6-92a1-799f816bc198" (UID: "8d972df7-8eff-4cc6-92a1-799f816bc198"). InnerVolumeSpecName "kube-api-access-w6bss". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 04:11:33 crc kubenswrapper[4798]: I1011 04:11:32.703999 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8d972df7-8eff-4cc6-92a1-799f816bc198-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "8d972df7-8eff-4cc6-92a1-799f816bc198" (UID: "8d972df7-8eff-4cc6-92a1-799f816bc198"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 04:11:33 crc kubenswrapper[4798]: I1011 04:11:32.704040 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8d972df7-8eff-4cc6-92a1-799f816bc198-config" (OuterVolumeSpecName: "config") pod "8d972df7-8eff-4cc6-92a1-799f816bc198" (UID: "8d972df7-8eff-4cc6-92a1-799f816bc198"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 04:11:33 crc kubenswrapper[4798]: I1011 04:11:32.705704 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8d972df7-8eff-4cc6-92a1-799f816bc198-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "8d972df7-8eff-4cc6-92a1-799f816bc198" (UID: "8d972df7-8eff-4cc6-92a1-799f816bc198"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 04:11:33 crc kubenswrapper[4798]: I1011 04:11:32.706172 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8d972df7-8eff-4cc6-92a1-799f816bc198-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "8d972df7-8eff-4cc6-92a1-799f816bc198" (UID: "8d972df7-8eff-4cc6-92a1-799f816bc198"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 04:11:33 crc kubenswrapper[4798]: I1011 04:11:32.753749 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-phfnr"] Oct 11 04:11:33 crc kubenswrapper[4798]: W1011 04:11:32.760065 4798 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd696f8bc_027f_43c0_b58d_72ea3f3f9c5c.slice/crio-9906d918edfc9cb5290f15626d21fc3d636b33e5f70c8106c79056c2ecd9199b WatchSource:0}: Error finding container 9906d918edfc9cb5290f15626d21fc3d636b33e5f70c8106c79056c2ecd9199b: Status 404 returned error can't find the container with id 9906d918edfc9cb5290f15626d21fc3d636b33e5f70c8106c79056c2ecd9199b Oct 11 04:11:33 crc kubenswrapper[4798]: I1011 04:11:32.762604 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w6bss\" (UniqueName: \"kubernetes.io/projected/8d972df7-8eff-4cc6-92a1-799f816bc198-kube-api-access-w6bss\") on node \"crc\" DevicePath \"\"" Oct 11 04:11:33 crc kubenswrapper[4798]: I1011 04:11:32.762624 4798 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8d972df7-8eff-4cc6-92a1-799f816bc198-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Oct 11 04:11:33 crc kubenswrapper[4798]: I1011 04:11:32.763378 4798 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8d972df7-8eff-4cc6-92a1-799f816bc198-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Oct 11 04:11:33 crc kubenswrapper[4798]: I1011 04:11:32.763412 4798 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8d972df7-8eff-4cc6-92a1-799f816bc198-dns-svc\") on node \"crc\" DevicePath \"\"" Oct 11 04:11:33 crc kubenswrapper[4798]: I1011 04:11:32.763420 4798 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8d972df7-8eff-4cc6-92a1-799f816bc198-config\") on node \"crc\" DevicePath \"\"" Oct 11 04:11:33 crc kubenswrapper[4798]: I1011 04:11:33.000958 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0020f7ba-0c70-4bcf-90a8-68729a98025c","Type":"ContainerStarted","Data":"81b3e9df75092a3efda3ab28bcc5bdcc08af56888948c5568fc5e4c04810cc9f"} Oct 11 04:11:33 crc kubenswrapper[4798]: I1011 04:11:33.003942 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8554648995-lhhsb" event={"ID":"8d972df7-8eff-4cc6-92a1-799f816bc198","Type":"ContainerDied","Data":"0d4fc09c63066d217198448312807a70d2c12164108b7e789978564067be8c26"} Oct 11 04:11:33 crc kubenswrapper[4798]: I1011 04:11:33.003989 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8554648995-lhhsb" Oct 11 04:11:33 crc kubenswrapper[4798]: I1011 04:11:33.004002 4798 scope.go:117] "RemoveContainer" containerID="66e5712c05c3d0e3528cefaa70d49785b5d3d6f65c1e53b617ed5fffc130df03" Oct 11 04:11:33 crc kubenswrapper[4798]: I1011 04:11:33.019525 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-01e2-account-create-d9hs4" event={"ID":"dea5f749-0ae2-4385-b9fd-675a6d651cd8","Type":"ContainerDied","Data":"638bc47a449f2199579ba2982426e922047b3fd31b804f006f9b36d0f7e320dc"} Oct 11 04:11:33 crc kubenswrapper[4798]: I1011 04:11:33.019552 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-01e2-account-create-d9hs4" Oct 11 04:11:33 crc kubenswrapper[4798]: I1011 04:11:33.019572 4798 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="638bc47a449f2199579ba2982426e922047b3fd31b804f006f9b36d0f7e320dc" Oct 11 04:11:33 crc kubenswrapper[4798]: I1011 04:11:33.025102 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-2ltmb" event={"ID":"79172d81-dfa6-4863-afbe-1e8c8b622f6d","Type":"ContainerStarted","Data":"bf1751f30a0b27a52a548cb5acab60ae68ed52dad84d483157ab0932d7862973"} Oct 11 04:11:33 crc kubenswrapper[4798]: I1011 04:11:33.032788 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-phfnr" event={"ID":"d696f8bc-027f-43c0-b58d-72ea3f3f9c5c","Type":"ContainerStarted","Data":"7e4683a342af7fb86273eb4c55f7d8fc13cd540c7a4d7a8535f881cb6e4fc355"} Oct 11 04:11:33 crc kubenswrapper[4798]: I1011 04:11:33.032868 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-phfnr" event={"ID":"d696f8bc-027f-43c0-b58d-72ea3f3f9c5c","Type":"ContainerStarted","Data":"9906d918edfc9cb5290f15626d21fc3d636b33e5f70c8106c79056c2ecd9199b"} Oct 11 04:11:33 crc kubenswrapper[4798]: I1011 04:11:33.038225 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-pjqvn" event={"ID":"00f2a7d2-ced9-4a97-97d9-e002e8b0ebbd","Type":"ContainerDied","Data":"0a564841764620effddf7b1fbab2010473c92cfca8d789aaeaa785825e16d03c"} Oct 11 04:11:33 crc kubenswrapper[4798]: I1011 04:11:33.039525 4798 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0a564841764620effddf7b1fbab2010473c92cfca8d789aaeaa785825e16d03c" Oct 11 04:11:33 crc kubenswrapper[4798]: I1011 04:11:33.039595 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-pjqvn" Oct 11 04:11:33 crc kubenswrapper[4798]: I1011 04:11:33.044555 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-db-sync-2ltmb" podStartSLOduration=2.187647802 podStartE2EDuration="11.044537925s" podCreationTimestamp="2025-10-11 04:11:22 +0000 UTC" firstStartedPulling="2025-10-11 04:11:23.401600359 +0000 UTC m=+978.737890045" lastFinishedPulling="2025-10-11 04:11:32.258490482 +0000 UTC m=+987.594780168" observedRunningTime="2025-10-11 04:11:33.043701144 +0000 UTC m=+988.379990830" watchObservedRunningTime="2025-10-11 04:11:33.044537925 +0000 UTC m=+988.380827611" Oct 11 04:11:33 crc kubenswrapper[4798]: I1011 04:11:33.065454 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-phfnr" podStartSLOduration=4.065434284 podStartE2EDuration="4.065434284s" podCreationTimestamp="2025-10-11 04:11:29 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 04:11:33.05899998 +0000 UTC m=+988.395289666" watchObservedRunningTime="2025-10-11 04:11:33.065434284 +0000 UTC m=+988.401723970" Oct 11 04:11:33 crc kubenswrapper[4798]: I1011 04:11:33.087371 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-8554648995-lhhsb"] Oct 11 04:11:33 crc kubenswrapper[4798]: I1011 04:11:33.093477 4798 scope.go:117] "RemoveContainer" containerID="48cb99dc4a4d9bef61b230298e4a87710b18b1ba49e0ab215ec5c1c6bc4f7963" Oct 11 04:11:33 crc kubenswrapper[4798]: I1011 04:11:33.095792 4798 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-8554648995-lhhsb"] Oct 11 04:11:33 crc kubenswrapper[4798]: I1011 04:11:33.451112 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8d972df7-8eff-4cc6-92a1-799f816bc198" path="/var/lib/kubelet/pods/8d972df7-8eff-4cc6-92a1-799f816bc198/volumes" Oct 11 04:11:33 crc kubenswrapper[4798]: I1011 04:11:33.485027 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-c67f5f784-ph78z"] Oct 11 04:11:33 crc kubenswrapper[4798]: E1011 04:11:33.485378 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8d972df7-8eff-4cc6-92a1-799f816bc198" containerName="init" Oct 11 04:11:33 crc kubenswrapper[4798]: I1011 04:11:33.485411 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="8d972df7-8eff-4cc6-92a1-799f816bc198" containerName="init" Oct 11 04:11:33 crc kubenswrapper[4798]: E1011 04:11:33.485445 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8d972df7-8eff-4cc6-92a1-799f816bc198" containerName="dnsmasq-dns" Oct 11 04:11:33 crc kubenswrapper[4798]: I1011 04:11:33.485452 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="8d972df7-8eff-4cc6-92a1-799f816bc198" containerName="dnsmasq-dns" Oct 11 04:11:33 crc kubenswrapper[4798]: E1011 04:11:33.485469 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="00f2a7d2-ced9-4a97-97d9-e002e8b0ebbd" containerName="placement-db-sync" Oct 11 04:11:33 crc kubenswrapper[4798]: I1011 04:11:33.485475 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="00f2a7d2-ced9-4a97-97d9-e002e8b0ebbd" containerName="placement-db-sync" Oct 11 04:11:33 crc kubenswrapper[4798]: E1011 04:11:33.485486 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dea5f749-0ae2-4385-b9fd-675a6d651cd8" containerName="mariadb-account-create" Oct 11 04:11:33 crc kubenswrapper[4798]: I1011 04:11:33.485492 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="dea5f749-0ae2-4385-b9fd-675a6d651cd8" containerName="mariadb-account-create" Oct 11 04:11:33 crc kubenswrapper[4798]: I1011 04:11:33.486247 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="8d972df7-8eff-4cc6-92a1-799f816bc198" containerName="dnsmasq-dns" Oct 11 04:11:33 crc kubenswrapper[4798]: I1011 04:11:33.486268 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="00f2a7d2-ced9-4a97-97d9-e002e8b0ebbd" containerName="placement-db-sync" Oct 11 04:11:33 crc kubenswrapper[4798]: I1011 04:11:33.486278 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="dea5f749-0ae2-4385-b9fd-675a6d651cd8" containerName="mariadb-account-create" Oct 11 04:11:33 crc kubenswrapper[4798]: I1011 04:11:33.492775 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-c67f5f784-ph78z" Oct 11 04:11:33 crc kubenswrapper[4798]: I1011 04:11:33.497455 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-v94bf" Oct 11 04:11:33 crc kubenswrapper[4798]: I1011 04:11:33.497690 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Oct 11 04:11:33 crc kubenswrapper[4798]: I1011 04:11:33.500014 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Oct 11 04:11:33 crc kubenswrapper[4798]: I1011 04:11:33.501041 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-c67f5f784-ph78z"] Oct 11 04:11:33 crc kubenswrapper[4798]: I1011 04:11:33.501129 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-internal-svc" Oct 11 04:11:33 crc kubenswrapper[4798]: I1011 04:11:33.501176 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-public-svc" Oct 11 04:11:33 crc kubenswrapper[4798]: I1011 04:11:33.584422 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/0843bdb2-fc67-4c61-991e-383ebdb67136-public-tls-certs\") pod \"placement-c67f5f784-ph78z\" (UID: \"0843bdb2-fc67-4c61-991e-383ebdb67136\") " pod="openstack/placement-c67f5f784-ph78z" Oct 11 04:11:33 crc kubenswrapper[4798]: I1011 04:11:33.584516 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0843bdb2-fc67-4c61-991e-383ebdb67136-logs\") pod \"placement-c67f5f784-ph78z\" (UID: \"0843bdb2-fc67-4c61-991e-383ebdb67136\") " pod="openstack/placement-c67f5f784-ph78z" Oct 11 04:11:33 crc kubenswrapper[4798]: I1011 04:11:33.584545 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hlw4z\" (UniqueName: \"kubernetes.io/projected/0843bdb2-fc67-4c61-991e-383ebdb67136-kube-api-access-hlw4z\") pod \"placement-c67f5f784-ph78z\" (UID: \"0843bdb2-fc67-4c61-991e-383ebdb67136\") " pod="openstack/placement-c67f5f784-ph78z" Oct 11 04:11:33 crc kubenswrapper[4798]: I1011 04:11:33.584658 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0843bdb2-fc67-4c61-991e-383ebdb67136-config-data\") pod \"placement-c67f5f784-ph78z\" (UID: \"0843bdb2-fc67-4c61-991e-383ebdb67136\") " pod="openstack/placement-c67f5f784-ph78z" Oct 11 04:11:33 crc kubenswrapper[4798]: I1011 04:11:33.584690 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/0843bdb2-fc67-4c61-991e-383ebdb67136-internal-tls-certs\") pod \"placement-c67f5f784-ph78z\" (UID: \"0843bdb2-fc67-4c61-991e-383ebdb67136\") " pod="openstack/placement-c67f5f784-ph78z" Oct 11 04:11:33 crc kubenswrapper[4798]: I1011 04:11:33.584718 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0843bdb2-fc67-4c61-991e-383ebdb67136-combined-ca-bundle\") pod \"placement-c67f5f784-ph78z\" (UID: \"0843bdb2-fc67-4c61-991e-383ebdb67136\") " pod="openstack/placement-c67f5f784-ph78z" Oct 11 04:11:33 crc kubenswrapper[4798]: I1011 04:11:33.585021 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0843bdb2-fc67-4c61-991e-383ebdb67136-scripts\") pod \"placement-c67f5f784-ph78z\" (UID: \"0843bdb2-fc67-4c61-991e-383ebdb67136\") " pod="openstack/placement-c67f5f784-ph78z" Oct 11 04:11:33 crc kubenswrapper[4798]: I1011 04:11:33.690637 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0843bdb2-fc67-4c61-991e-383ebdb67136-config-data\") pod \"placement-c67f5f784-ph78z\" (UID: \"0843bdb2-fc67-4c61-991e-383ebdb67136\") " pod="openstack/placement-c67f5f784-ph78z" Oct 11 04:11:33 crc kubenswrapper[4798]: I1011 04:11:33.690704 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/0843bdb2-fc67-4c61-991e-383ebdb67136-internal-tls-certs\") pod \"placement-c67f5f784-ph78z\" (UID: \"0843bdb2-fc67-4c61-991e-383ebdb67136\") " pod="openstack/placement-c67f5f784-ph78z" Oct 11 04:11:33 crc kubenswrapper[4798]: I1011 04:11:33.690747 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0843bdb2-fc67-4c61-991e-383ebdb67136-combined-ca-bundle\") pod \"placement-c67f5f784-ph78z\" (UID: \"0843bdb2-fc67-4c61-991e-383ebdb67136\") " pod="openstack/placement-c67f5f784-ph78z" Oct 11 04:11:33 crc kubenswrapper[4798]: I1011 04:11:33.690872 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0843bdb2-fc67-4c61-991e-383ebdb67136-scripts\") pod \"placement-c67f5f784-ph78z\" (UID: \"0843bdb2-fc67-4c61-991e-383ebdb67136\") " pod="openstack/placement-c67f5f784-ph78z" Oct 11 04:11:33 crc kubenswrapper[4798]: I1011 04:11:33.690905 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/0843bdb2-fc67-4c61-991e-383ebdb67136-public-tls-certs\") pod \"placement-c67f5f784-ph78z\" (UID: \"0843bdb2-fc67-4c61-991e-383ebdb67136\") " pod="openstack/placement-c67f5f784-ph78z" Oct 11 04:11:33 crc kubenswrapper[4798]: I1011 04:11:33.690986 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0843bdb2-fc67-4c61-991e-383ebdb67136-logs\") pod \"placement-c67f5f784-ph78z\" (UID: \"0843bdb2-fc67-4c61-991e-383ebdb67136\") " pod="openstack/placement-c67f5f784-ph78z" Oct 11 04:11:33 crc kubenswrapper[4798]: I1011 04:11:33.691021 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hlw4z\" (UniqueName: \"kubernetes.io/projected/0843bdb2-fc67-4c61-991e-383ebdb67136-kube-api-access-hlw4z\") pod \"placement-c67f5f784-ph78z\" (UID: \"0843bdb2-fc67-4c61-991e-383ebdb67136\") " pod="openstack/placement-c67f5f784-ph78z" Oct 11 04:11:33 crc kubenswrapper[4798]: I1011 04:11:33.692621 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0843bdb2-fc67-4c61-991e-383ebdb67136-logs\") pod \"placement-c67f5f784-ph78z\" (UID: \"0843bdb2-fc67-4c61-991e-383ebdb67136\") " pod="openstack/placement-c67f5f784-ph78z" Oct 11 04:11:33 crc kubenswrapper[4798]: I1011 04:11:33.698461 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0843bdb2-fc67-4c61-991e-383ebdb67136-scripts\") pod \"placement-c67f5f784-ph78z\" (UID: \"0843bdb2-fc67-4c61-991e-383ebdb67136\") " pod="openstack/placement-c67f5f784-ph78z" Oct 11 04:11:33 crc kubenswrapper[4798]: I1011 04:11:33.698559 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/0843bdb2-fc67-4c61-991e-383ebdb67136-public-tls-certs\") pod \"placement-c67f5f784-ph78z\" (UID: \"0843bdb2-fc67-4c61-991e-383ebdb67136\") " pod="openstack/placement-c67f5f784-ph78z" Oct 11 04:11:33 crc kubenswrapper[4798]: I1011 04:11:33.698868 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0843bdb2-fc67-4c61-991e-383ebdb67136-config-data\") pod \"placement-c67f5f784-ph78z\" (UID: \"0843bdb2-fc67-4c61-991e-383ebdb67136\") " pod="openstack/placement-c67f5f784-ph78z" Oct 11 04:11:33 crc kubenswrapper[4798]: I1011 04:11:33.714625 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/0843bdb2-fc67-4c61-991e-383ebdb67136-internal-tls-certs\") pod \"placement-c67f5f784-ph78z\" (UID: \"0843bdb2-fc67-4c61-991e-383ebdb67136\") " pod="openstack/placement-c67f5f784-ph78z" Oct 11 04:11:33 crc kubenswrapper[4798]: I1011 04:11:33.716464 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0843bdb2-fc67-4c61-991e-383ebdb67136-combined-ca-bundle\") pod \"placement-c67f5f784-ph78z\" (UID: \"0843bdb2-fc67-4c61-991e-383ebdb67136\") " pod="openstack/placement-c67f5f784-ph78z" Oct 11 04:11:33 crc kubenswrapper[4798]: I1011 04:11:33.720768 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hlw4z\" (UniqueName: \"kubernetes.io/projected/0843bdb2-fc67-4c61-991e-383ebdb67136-kube-api-access-hlw4z\") pod \"placement-c67f5f784-ph78z\" (UID: \"0843bdb2-fc67-4c61-991e-383ebdb67136\") " pod="openstack/placement-c67f5f784-ph78z" Oct 11 04:11:33 crc kubenswrapper[4798]: I1011 04:11:33.819324 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-c67f5f784-ph78z" Oct 11 04:11:34 crc kubenswrapper[4798]: I1011 04:11:34.345256 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-c67f5f784-ph78z"] Oct 11 04:11:35 crc kubenswrapper[4798]: I1011 04:11:35.065828 4798 generic.go:334] "Generic (PLEG): container finished" podID="79172d81-dfa6-4863-afbe-1e8c8b622f6d" containerID="bf1751f30a0b27a52a548cb5acab60ae68ed52dad84d483157ab0932d7862973" exitCode=0 Oct 11 04:11:35 crc kubenswrapper[4798]: I1011 04:11:35.065925 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-2ltmb" event={"ID":"79172d81-dfa6-4863-afbe-1e8c8b622f6d","Type":"ContainerDied","Data":"bf1751f30a0b27a52a548cb5acab60ae68ed52dad84d483157ab0932d7862973"} Oct 11 04:11:36 crc kubenswrapper[4798]: I1011 04:11:36.077429 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-phfnr" event={"ID":"d696f8bc-027f-43c0-b58d-72ea3f3f9c5c","Type":"ContainerDied","Data":"7e4683a342af7fb86273eb4c55f7d8fc13cd540c7a4d7a8535f881cb6e4fc355"} Oct 11 04:11:36 crc kubenswrapper[4798]: I1011 04:11:36.077416 4798 generic.go:334] "Generic (PLEG): container finished" podID="d696f8bc-027f-43c0-b58d-72ea3f3f9c5c" containerID="7e4683a342af7fb86273eb4c55f7d8fc13cd540c7a4d7a8535f881cb6e4fc355" exitCode=0 Oct 11 04:11:37 crc kubenswrapper[4798]: I1011 04:11:37.555066 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-sync-f5wn5"] Oct 11 04:11:37 crc kubenswrapper[4798]: I1011 04:11:37.557756 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-f5wn5" Oct 11 04:11:37 crc kubenswrapper[4798]: I1011 04:11:37.561825 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Oct 11 04:11:37 crc kubenswrapper[4798]: I1011 04:11:37.562500 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-4q929" Oct 11 04:11:37 crc kubenswrapper[4798]: I1011 04:11:37.562621 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Oct 11 04:11:37 crc kubenswrapper[4798]: I1011 04:11:37.564092 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-f5wn5"] Oct 11 04:11:37 crc kubenswrapper[4798]: I1011 04:11:37.609067 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1afeba46-c2cc-455e-ab80-56f553c9e4cb-combined-ca-bundle\") pod \"neutron-db-sync-f5wn5\" (UID: \"1afeba46-c2cc-455e-ab80-56f553c9e4cb\") " pod="openstack/neutron-db-sync-f5wn5" Oct 11 04:11:37 crc kubenswrapper[4798]: I1011 04:11:37.609119 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/1afeba46-c2cc-455e-ab80-56f553c9e4cb-config\") pod \"neutron-db-sync-f5wn5\" (UID: \"1afeba46-c2cc-455e-ab80-56f553c9e4cb\") " pod="openstack/neutron-db-sync-f5wn5" Oct 11 04:11:37 crc kubenswrapper[4798]: I1011 04:11:37.609547 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-psdsj\" (UniqueName: \"kubernetes.io/projected/1afeba46-c2cc-455e-ab80-56f553c9e4cb-kube-api-access-psdsj\") pod \"neutron-db-sync-f5wn5\" (UID: \"1afeba46-c2cc-455e-ab80-56f553c9e4cb\") " pod="openstack/neutron-db-sync-f5wn5" Oct 11 04:11:37 crc kubenswrapper[4798]: I1011 04:11:37.711823 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-psdsj\" (UniqueName: \"kubernetes.io/projected/1afeba46-c2cc-455e-ab80-56f553c9e4cb-kube-api-access-psdsj\") pod \"neutron-db-sync-f5wn5\" (UID: \"1afeba46-c2cc-455e-ab80-56f553c9e4cb\") " pod="openstack/neutron-db-sync-f5wn5" Oct 11 04:11:37 crc kubenswrapper[4798]: I1011 04:11:37.711929 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1afeba46-c2cc-455e-ab80-56f553c9e4cb-combined-ca-bundle\") pod \"neutron-db-sync-f5wn5\" (UID: \"1afeba46-c2cc-455e-ab80-56f553c9e4cb\") " pod="openstack/neutron-db-sync-f5wn5" Oct 11 04:11:37 crc kubenswrapper[4798]: I1011 04:11:37.712011 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/1afeba46-c2cc-455e-ab80-56f553c9e4cb-config\") pod \"neutron-db-sync-f5wn5\" (UID: \"1afeba46-c2cc-455e-ab80-56f553c9e4cb\") " pod="openstack/neutron-db-sync-f5wn5" Oct 11 04:11:37 crc kubenswrapper[4798]: I1011 04:11:37.723489 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/1afeba46-c2cc-455e-ab80-56f553c9e4cb-config\") pod \"neutron-db-sync-f5wn5\" (UID: \"1afeba46-c2cc-455e-ab80-56f553c9e4cb\") " pod="openstack/neutron-db-sync-f5wn5" Oct 11 04:11:37 crc kubenswrapper[4798]: I1011 04:11:37.724600 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1afeba46-c2cc-455e-ab80-56f553c9e4cb-combined-ca-bundle\") pod \"neutron-db-sync-f5wn5\" (UID: \"1afeba46-c2cc-455e-ab80-56f553c9e4cb\") " pod="openstack/neutron-db-sync-f5wn5" Oct 11 04:11:37 crc kubenswrapper[4798]: I1011 04:11:37.728217 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-psdsj\" (UniqueName: \"kubernetes.io/projected/1afeba46-c2cc-455e-ab80-56f553c9e4cb-kube-api-access-psdsj\") pod \"neutron-db-sync-f5wn5\" (UID: \"1afeba46-c2cc-455e-ab80-56f553c9e4cb\") " pod="openstack/neutron-db-sync-f5wn5" Oct 11 04:11:37 crc kubenswrapper[4798]: I1011 04:11:37.887074 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-f5wn5" Oct 11 04:11:43 crc kubenswrapper[4798]: I1011 04:11:43.427445 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-phfnr" Oct 11 04:11:43 crc kubenswrapper[4798]: I1011 04:11:43.440994 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d696f8bc-027f-43c0-b58d-72ea3f3f9c5c-config-data\") pod \"d696f8bc-027f-43c0-b58d-72ea3f3f9c5c\" (UID: \"d696f8bc-027f-43c0-b58d-72ea3f3f9c5c\") " Oct 11 04:11:43 crc kubenswrapper[4798]: I1011 04:11:43.441192 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d696f8bc-027f-43c0-b58d-72ea3f3f9c5c-combined-ca-bundle\") pod \"d696f8bc-027f-43c0-b58d-72ea3f3f9c5c\" (UID: \"d696f8bc-027f-43c0-b58d-72ea3f3f9c5c\") " Oct 11 04:11:43 crc kubenswrapper[4798]: I1011 04:11:43.441231 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w8k2w\" (UniqueName: \"kubernetes.io/projected/d696f8bc-027f-43c0-b58d-72ea3f3f9c5c-kube-api-access-w8k2w\") pod \"d696f8bc-027f-43c0-b58d-72ea3f3f9c5c\" (UID: \"d696f8bc-027f-43c0-b58d-72ea3f3f9c5c\") " Oct 11 04:11:43 crc kubenswrapper[4798]: I1011 04:11:43.441295 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/d696f8bc-027f-43c0-b58d-72ea3f3f9c5c-fernet-keys\") pod \"d696f8bc-027f-43c0-b58d-72ea3f3f9c5c\" (UID: \"d696f8bc-027f-43c0-b58d-72ea3f3f9c5c\") " Oct 11 04:11:43 crc kubenswrapper[4798]: I1011 04:11:43.441356 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/d696f8bc-027f-43c0-b58d-72ea3f3f9c5c-credential-keys\") pod \"d696f8bc-027f-43c0-b58d-72ea3f3f9c5c\" (UID: \"d696f8bc-027f-43c0-b58d-72ea3f3f9c5c\") " Oct 11 04:11:43 crc kubenswrapper[4798]: I1011 04:11:43.442526 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d696f8bc-027f-43c0-b58d-72ea3f3f9c5c-scripts\") pod \"d696f8bc-027f-43c0-b58d-72ea3f3f9c5c\" (UID: \"d696f8bc-027f-43c0-b58d-72ea3f3f9c5c\") " Oct 11 04:11:43 crc kubenswrapper[4798]: I1011 04:11:43.459861 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d696f8bc-027f-43c0-b58d-72ea3f3f9c5c-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "d696f8bc-027f-43c0-b58d-72ea3f3f9c5c" (UID: "d696f8bc-027f-43c0-b58d-72ea3f3f9c5c"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:11:43 crc kubenswrapper[4798]: I1011 04:11:43.460729 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d696f8bc-027f-43c0-b58d-72ea3f3f9c5c-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "d696f8bc-027f-43c0-b58d-72ea3f3f9c5c" (UID: "d696f8bc-027f-43c0-b58d-72ea3f3f9c5c"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:11:43 crc kubenswrapper[4798]: I1011 04:11:43.463141 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d696f8bc-027f-43c0-b58d-72ea3f3f9c5c-kube-api-access-w8k2w" (OuterVolumeSpecName: "kube-api-access-w8k2w") pod "d696f8bc-027f-43c0-b58d-72ea3f3f9c5c" (UID: "d696f8bc-027f-43c0-b58d-72ea3f3f9c5c"). InnerVolumeSpecName "kube-api-access-w8k2w". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 04:11:43 crc kubenswrapper[4798]: I1011 04:11:43.463549 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d696f8bc-027f-43c0-b58d-72ea3f3f9c5c-scripts" (OuterVolumeSpecName: "scripts") pod "d696f8bc-027f-43c0-b58d-72ea3f3f9c5c" (UID: "d696f8bc-027f-43c0-b58d-72ea3f3f9c5c"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:11:43 crc kubenswrapper[4798]: I1011 04:11:43.486527 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d696f8bc-027f-43c0-b58d-72ea3f3f9c5c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d696f8bc-027f-43c0-b58d-72ea3f3f9c5c" (UID: "d696f8bc-027f-43c0-b58d-72ea3f3f9c5c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:11:43 crc kubenswrapper[4798]: I1011 04:11:43.498615 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d696f8bc-027f-43c0-b58d-72ea3f3f9c5c-config-data" (OuterVolumeSpecName: "config-data") pod "d696f8bc-027f-43c0-b58d-72ea3f3f9c5c" (UID: "d696f8bc-027f-43c0-b58d-72ea3f3f9c5c"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:11:43 crc kubenswrapper[4798]: I1011 04:11:43.544603 4798 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d696f8bc-027f-43c0-b58d-72ea3f3f9c5c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 11 04:11:43 crc kubenswrapper[4798]: I1011 04:11:43.545072 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w8k2w\" (UniqueName: \"kubernetes.io/projected/d696f8bc-027f-43c0-b58d-72ea3f3f9c5c-kube-api-access-w8k2w\") on node \"crc\" DevicePath \"\"" Oct 11 04:11:43 crc kubenswrapper[4798]: I1011 04:11:43.545086 4798 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/d696f8bc-027f-43c0-b58d-72ea3f3f9c5c-fernet-keys\") on node \"crc\" DevicePath \"\"" Oct 11 04:11:43 crc kubenswrapper[4798]: I1011 04:11:43.545096 4798 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/d696f8bc-027f-43c0-b58d-72ea3f3f9c5c-credential-keys\") on node \"crc\" DevicePath \"\"" Oct 11 04:11:43 crc kubenswrapper[4798]: I1011 04:11:43.545105 4798 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d696f8bc-027f-43c0-b58d-72ea3f3f9c5c-scripts\") on node \"crc\" DevicePath \"\"" Oct 11 04:11:43 crc kubenswrapper[4798]: I1011 04:11:43.545113 4798 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d696f8bc-027f-43c0-b58d-72ea3f3f9c5c-config-data\") on node \"crc\" DevicePath \"\"" Oct 11 04:11:44 crc kubenswrapper[4798]: I1011 04:11:44.150717 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-phfnr" event={"ID":"d696f8bc-027f-43c0-b58d-72ea3f3f9c5c","Type":"ContainerDied","Data":"9906d918edfc9cb5290f15626d21fc3d636b33e5f70c8106c79056c2ecd9199b"} Oct 11 04:11:44 crc kubenswrapper[4798]: I1011 04:11:44.150775 4798 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9906d918edfc9cb5290f15626d21fc3d636b33e5f70c8106c79056c2ecd9199b" Oct 11 04:11:44 crc kubenswrapper[4798]: I1011 04:11:44.150810 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-phfnr" Oct 11 04:11:44 crc kubenswrapper[4798]: I1011 04:11:44.541547 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bf585fdff-grnph"] Oct 11 04:11:44 crc kubenswrapper[4798]: E1011 04:11:44.541957 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d696f8bc-027f-43c0-b58d-72ea3f3f9c5c" containerName="keystone-bootstrap" Oct 11 04:11:44 crc kubenswrapper[4798]: I1011 04:11:44.541973 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="d696f8bc-027f-43c0-b58d-72ea3f3f9c5c" containerName="keystone-bootstrap" Oct 11 04:11:44 crc kubenswrapper[4798]: I1011 04:11:44.542192 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="d696f8bc-027f-43c0-b58d-72ea3f3f9c5c" containerName="keystone-bootstrap" Oct 11 04:11:44 crc kubenswrapper[4798]: I1011 04:11:44.542953 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bf585fdff-grnph" Oct 11 04:11:44 crc kubenswrapper[4798]: I1011 04:11:44.545528 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Oct 11 04:11:44 crc kubenswrapper[4798]: I1011 04:11:44.546236 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Oct 11 04:11:44 crc kubenswrapper[4798]: I1011 04:11:44.550182 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-internal-svc" Oct 11 04:11:44 crc kubenswrapper[4798]: I1011 04:11:44.550433 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-public-svc" Oct 11 04:11:44 crc kubenswrapper[4798]: I1011 04:11:44.550602 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-768mh" Oct 11 04:11:44 crc kubenswrapper[4798]: I1011 04:11:44.553064 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bf585fdff-grnph"] Oct 11 04:11:44 crc kubenswrapper[4798]: I1011 04:11:44.556938 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Oct 11 04:11:44 crc kubenswrapper[4798]: I1011 04:11:44.563512 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/0d75e3b2-e53b-4823-9272-b7038c8b379c-internal-tls-certs\") pod \"keystone-bf585fdff-grnph\" (UID: \"0d75e3b2-e53b-4823-9272-b7038c8b379c\") " pod="openstack/keystone-bf585fdff-grnph" Oct 11 04:11:44 crc kubenswrapper[4798]: I1011 04:11:44.563582 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/0d75e3b2-e53b-4823-9272-b7038c8b379c-credential-keys\") pod \"keystone-bf585fdff-grnph\" (UID: \"0d75e3b2-e53b-4823-9272-b7038c8b379c\") " pod="openstack/keystone-bf585fdff-grnph" Oct 11 04:11:44 crc kubenswrapper[4798]: I1011 04:11:44.563609 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0d75e3b2-e53b-4823-9272-b7038c8b379c-scripts\") pod \"keystone-bf585fdff-grnph\" (UID: \"0d75e3b2-e53b-4823-9272-b7038c8b379c\") " pod="openstack/keystone-bf585fdff-grnph" Oct 11 04:11:44 crc kubenswrapper[4798]: I1011 04:11:44.563654 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0d75e3b2-e53b-4823-9272-b7038c8b379c-config-data\") pod \"keystone-bf585fdff-grnph\" (UID: \"0d75e3b2-e53b-4823-9272-b7038c8b379c\") " pod="openstack/keystone-bf585fdff-grnph" Oct 11 04:11:44 crc kubenswrapper[4798]: I1011 04:11:44.563675 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0d75e3b2-e53b-4823-9272-b7038c8b379c-combined-ca-bundle\") pod \"keystone-bf585fdff-grnph\" (UID: \"0d75e3b2-e53b-4823-9272-b7038c8b379c\") " pod="openstack/keystone-bf585fdff-grnph" Oct 11 04:11:44 crc kubenswrapper[4798]: I1011 04:11:44.563720 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/0d75e3b2-e53b-4823-9272-b7038c8b379c-public-tls-certs\") pod \"keystone-bf585fdff-grnph\" (UID: \"0d75e3b2-e53b-4823-9272-b7038c8b379c\") " pod="openstack/keystone-bf585fdff-grnph" Oct 11 04:11:44 crc kubenswrapper[4798]: I1011 04:11:44.563747 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/0d75e3b2-e53b-4823-9272-b7038c8b379c-fernet-keys\") pod \"keystone-bf585fdff-grnph\" (UID: \"0d75e3b2-e53b-4823-9272-b7038c8b379c\") " pod="openstack/keystone-bf585fdff-grnph" Oct 11 04:11:44 crc kubenswrapper[4798]: I1011 04:11:44.563801 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9j2x9\" (UniqueName: \"kubernetes.io/projected/0d75e3b2-e53b-4823-9272-b7038c8b379c-kube-api-access-9j2x9\") pod \"keystone-bf585fdff-grnph\" (UID: \"0d75e3b2-e53b-4823-9272-b7038c8b379c\") " pod="openstack/keystone-bf585fdff-grnph" Oct 11 04:11:44 crc kubenswrapper[4798]: I1011 04:11:44.666038 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/0d75e3b2-e53b-4823-9272-b7038c8b379c-internal-tls-certs\") pod \"keystone-bf585fdff-grnph\" (UID: \"0d75e3b2-e53b-4823-9272-b7038c8b379c\") " pod="openstack/keystone-bf585fdff-grnph" Oct 11 04:11:44 crc kubenswrapper[4798]: I1011 04:11:44.666110 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/0d75e3b2-e53b-4823-9272-b7038c8b379c-credential-keys\") pod \"keystone-bf585fdff-grnph\" (UID: \"0d75e3b2-e53b-4823-9272-b7038c8b379c\") " pod="openstack/keystone-bf585fdff-grnph" Oct 11 04:11:44 crc kubenswrapper[4798]: I1011 04:11:44.666142 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0d75e3b2-e53b-4823-9272-b7038c8b379c-scripts\") pod \"keystone-bf585fdff-grnph\" (UID: \"0d75e3b2-e53b-4823-9272-b7038c8b379c\") " pod="openstack/keystone-bf585fdff-grnph" Oct 11 04:11:44 crc kubenswrapper[4798]: I1011 04:11:44.666196 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0d75e3b2-e53b-4823-9272-b7038c8b379c-config-data\") pod \"keystone-bf585fdff-grnph\" (UID: \"0d75e3b2-e53b-4823-9272-b7038c8b379c\") " pod="openstack/keystone-bf585fdff-grnph" Oct 11 04:11:44 crc kubenswrapper[4798]: I1011 04:11:44.666220 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0d75e3b2-e53b-4823-9272-b7038c8b379c-combined-ca-bundle\") pod \"keystone-bf585fdff-grnph\" (UID: \"0d75e3b2-e53b-4823-9272-b7038c8b379c\") " pod="openstack/keystone-bf585fdff-grnph" Oct 11 04:11:44 crc kubenswrapper[4798]: I1011 04:11:44.666274 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/0d75e3b2-e53b-4823-9272-b7038c8b379c-public-tls-certs\") pod \"keystone-bf585fdff-grnph\" (UID: \"0d75e3b2-e53b-4823-9272-b7038c8b379c\") " pod="openstack/keystone-bf585fdff-grnph" Oct 11 04:11:44 crc kubenswrapper[4798]: I1011 04:11:44.666308 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/0d75e3b2-e53b-4823-9272-b7038c8b379c-fernet-keys\") pod \"keystone-bf585fdff-grnph\" (UID: \"0d75e3b2-e53b-4823-9272-b7038c8b379c\") " pod="openstack/keystone-bf585fdff-grnph" Oct 11 04:11:44 crc kubenswrapper[4798]: I1011 04:11:44.666381 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9j2x9\" (UniqueName: \"kubernetes.io/projected/0d75e3b2-e53b-4823-9272-b7038c8b379c-kube-api-access-9j2x9\") pod \"keystone-bf585fdff-grnph\" (UID: \"0d75e3b2-e53b-4823-9272-b7038c8b379c\") " pod="openstack/keystone-bf585fdff-grnph" Oct 11 04:11:44 crc kubenswrapper[4798]: I1011 04:11:44.670359 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/0d75e3b2-e53b-4823-9272-b7038c8b379c-internal-tls-certs\") pod \"keystone-bf585fdff-grnph\" (UID: \"0d75e3b2-e53b-4823-9272-b7038c8b379c\") " pod="openstack/keystone-bf585fdff-grnph" Oct 11 04:11:44 crc kubenswrapper[4798]: I1011 04:11:44.670675 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0d75e3b2-e53b-4823-9272-b7038c8b379c-combined-ca-bundle\") pod \"keystone-bf585fdff-grnph\" (UID: \"0d75e3b2-e53b-4823-9272-b7038c8b379c\") " pod="openstack/keystone-bf585fdff-grnph" Oct 11 04:11:44 crc kubenswrapper[4798]: I1011 04:11:44.671023 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/0d75e3b2-e53b-4823-9272-b7038c8b379c-public-tls-certs\") pod \"keystone-bf585fdff-grnph\" (UID: \"0d75e3b2-e53b-4823-9272-b7038c8b379c\") " pod="openstack/keystone-bf585fdff-grnph" Oct 11 04:11:44 crc kubenswrapper[4798]: I1011 04:11:44.672114 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0d75e3b2-e53b-4823-9272-b7038c8b379c-config-data\") pod \"keystone-bf585fdff-grnph\" (UID: \"0d75e3b2-e53b-4823-9272-b7038c8b379c\") " pod="openstack/keystone-bf585fdff-grnph" Oct 11 04:11:44 crc kubenswrapper[4798]: I1011 04:11:44.672289 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0d75e3b2-e53b-4823-9272-b7038c8b379c-scripts\") pod \"keystone-bf585fdff-grnph\" (UID: \"0d75e3b2-e53b-4823-9272-b7038c8b379c\") " pod="openstack/keystone-bf585fdff-grnph" Oct 11 04:11:44 crc kubenswrapper[4798]: I1011 04:11:44.672643 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/0d75e3b2-e53b-4823-9272-b7038c8b379c-credential-keys\") pod \"keystone-bf585fdff-grnph\" (UID: \"0d75e3b2-e53b-4823-9272-b7038c8b379c\") " pod="openstack/keystone-bf585fdff-grnph" Oct 11 04:11:44 crc kubenswrapper[4798]: I1011 04:11:44.678097 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/0d75e3b2-e53b-4823-9272-b7038c8b379c-fernet-keys\") pod \"keystone-bf585fdff-grnph\" (UID: \"0d75e3b2-e53b-4823-9272-b7038c8b379c\") " pod="openstack/keystone-bf585fdff-grnph" Oct 11 04:11:44 crc kubenswrapper[4798]: I1011 04:11:44.690832 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9j2x9\" (UniqueName: \"kubernetes.io/projected/0d75e3b2-e53b-4823-9272-b7038c8b379c-kube-api-access-9j2x9\") pod \"keystone-bf585fdff-grnph\" (UID: \"0d75e3b2-e53b-4823-9272-b7038c8b379c\") " pod="openstack/keystone-bf585fdff-grnph" Oct 11 04:11:44 crc kubenswrapper[4798]: I1011 04:11:44.863697 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bf585fdff-grnph" Oct 11 04:11:45 crc kubenswrapper[4798]: W1011 04:11:45.633303 4798 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0843bdb2_fc67_4c61_991e_383ebdb67136.slice/crio-6424465405bbc6292b3c97bd413258ec81dc37a4010a5c4d625f91106fdc05fe WatchSource:0}: Error finding container 6424465405bbc6292b3c97bd413258ec81dc37a4010a5c4d625f91106fdc05fe: Status 404 returned error can't find the container with id 6424465405bbc6292b3c97bd413258ec81dc37a4010a5c4d625f91106fdc05fe Oct 11 04:11:45 crc kubenswrapper[4798]: E1011 04:11:45.713099 4798 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified" Oct 11 04:11:45 crc kubenswrapper[4798]: E1011 04:11:45.713308 4798 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:cinder-db-sync,Image:quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_set_configs && /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:etc-machine-id,ReadOnly:true,MountPath:/etc/machine-id,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:scripts,ReadOnly:true,MountPath:/usr/local/bin/container-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/config-data/merged,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/cinder/cinder.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:db-sync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-tc445,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:*0,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod cinder-db-sync-btwq2_openstack(c7e46251-4b37-40c6-adbc-877857e4442b): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Oct 11 04:11:45 crc kubenswrapper[4798]: E1011 04:11:45.714620 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cinder-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/cinder-db-sync-btwq2" podUID="c7e46251-4b37-40c6-adbc-877857e4442b" Oct 11 04:11:45 crc kubenswrapper[4798]: I1011 04:11:45.717426 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-2ltmb" Oct 11 04:11:45 crc kubenswrapper[4798]: I1011 04:11:45.799683 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/79172d81-dfa6-4863-afbe-1e8c8b622f6d-db-sync-config-data\") pod \"79172d81-dfa6-4863-afbe-1e8c8b622f6d\" (UID: \"79172d81-dfa6-4863-afbe-1e8c8b622f6d\") " Oct 11 04:11:45 crc kubenswrapper[4798]: I1011 04:11:45.799847 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-824ld\" (UniqueName: \"kubernetes.io/projected/79172d81-dfa6-4863-afbe-1e8c8b622f6d-kube-api-access-824ld\") pod \"79172d81-dfa6-4863-afbe-1e8c8b622f6d\" (UID: \"79172d81-dfa6-4863-afbe-1e8c8b622f6d\") " Oct 11 04:11:45 crc kubenswrapper[4798]: I1011 04:11:45.800014 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/79172d81-dfa6-4863-afbe-1e8c8b622f6d-combined-ca-bundle\") pod \"79172d81-dfa6-4863-afbe-1e8c8b622f6d\" (UID: \"79172d81-dfa6-4863-afbe-1e8c8b622f6d\") " Oct 11 04:11:45 crc kubenswrapper[4798]: I1011 04:11:45.829542 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/79172d81-dfa6-4863-afbe-1e8c8b622f6d-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "79172d81-dfa6-4863-afbe-1e8c8b622f6d" (UID: "79172d81-dfa6-4863-afbe-1e8c8b622f6d"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:11:45 crc kubenswrapper[4798]: I1011 04:11:45.830473 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/79172d81-dfa6-4863-afbe-1e8c8b622f6d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "79172d81-dfa6-4863-afbe-1e8c8b622f6d" (UID: "79172d81-dfa6-4863-afbe-1e8c8b622f6d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:11:45 crc kubenswrapper[4798]: I1011 04:11:45.831235 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/79172d81-dfa6-4863-afbe-1e8c8b622f6d-kube-api-access-824ld" (OuterVolumeSpecName: "kube-api-access-824ld") pod "79172d81-dfa6-4863-afbe-1e8c8b622f6d" (UID: "79172d81-dfa6-4863-afbe-1e8c8b622f6d"). InnerVolumeSpecName "kube-api-access-824ld". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 04:11:45 crc kubenswrapper[4798]: I1011 04:11:45.909056 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-824ld\" (UniqueName: \"kubernetes.io/projected/79172d81-dfa6-4863-afbe-1e8c8b622f6d-kube-api-access-824ld\") on node \"crc\" DevicePath \"\"" Oct 11 04:11:45 crc kubenswrapper[4798]: I1011 04:11:45.909095 4798 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/79172d81-dfa6-4863-afbe-1e8c8b622f6d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 11 04:11:45 crc kubenswrapper[4798]: I1011 04:11:45.909105 4798 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/79172d81-dfa6-4863-afbe-1e8c8b622f6d-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Oct 11 04:11:46 crc kubenswrapper[4798]: I1011 04:11:46.171306 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-c67f5f784-ph78z" event={"ID":"0843bdb2-fc67-4c61-991e-383ebdb67136","Type":"ContainerStarted","Data":"6424465405bbc6292b3c97bd413258ec81dc37a4010a5c4d625f91106fdc05fe"} Oct 11 04:11:46 crc kubenswrapper[4798]: I1011 04:11:46.174209 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-2ltmb" event={"ID":"79172d81-dfa6-4863-afbe-1e8c8b622f6d","Type":"ContainerDied","Data":"1be0b9d4e17a4f1e52318173514b130751f2277bdd234fadb2d319de3dcadeee"} Oct 11 04:11:46 crc kubenswrapper[4798]: I1011 04:11:46.174269 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-2ltmb" Oct 11 04:11:46 crc kubenswrapper[4798]: I1011 04:11:46.174278 4798 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1be0b9d4e17a4f1e52318173514b130751f2277bdd234fadb2d319de3dcadeee" Oct 11 04:11:46 crc kubenswrapper[4798]: E1011 04:11:46.176851 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cinder-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified\\\"\"" pod="openstack/cinder-db-sync-btwq2" podUID="c7e46251-4b37-40c6-adbc-877857e4442b" Oct 11 04:11:46 crc kubenswrapper[4798]: I1011 04:11:46.988551 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-worker-55d589c759-9xthl"] Oct 11 04:11:46 crc kubenswrapper[4798]: E1011 04:11:46.988938 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="79172d81-dfa6-4863-afbe-1e8c8b622f6d" containerName="barbican-db-sync" Oct 11 04:11:46 crc kubenswrapper[4798]: I1011 04:11:46.988949 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="79172d81-dfa6-4863-afbe-1e8c8b622f6d" containerName="barbican-db-sync" Oct 11 04:11:46 crc kubenswrapper[4798]: I1011 04:11:46.989121 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="79172d81-dfa6-4863-afbe-1e8c8b622f6d" containerName="barbican-db-sync" Oct 11 04:11:46 crc kubenswrapper[4798]: I1011 04:11:46.990031 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-55d589c759-9xthl" Oct 11 04:11:46 crc kubenswrapper[4798]: I1011 04:11:46.997664 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-cf94r" Oct 11 04:11:46 crc kubenswrapper[4798]: I1011 04:11:46.997873 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-worker-config-data" Oct 11 04:11:47 crc kubenswrapper[4798]: I1011 04:11:47.002220 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Oct 11 04:11:47 crc kubenswrapper[4798]: I1011 04:11:47.008028 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-keystone-listener-6574767f6b-8p24k"] Oct 11 04:11:47 crc kubenswrapper[4798]: I1011 04:11:47.009446 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-6574767f6b-8p24k" Oct 11 04:11:47 crc kubenswrapper[4798]: I1011 04:11:47.013914 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-keystone-listener-config-data" Oct 11 04:11:47 crc kubenswrapper[4798]: I1011 04:11:47.017530 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-55d589c759-9xthl"] Oct 11 04:11:47 crc kubenswrapper[4798]: I1011 04:11:47.034156 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vvttl\" (UniqueName: \"kubernetes.io/projected/708609f6-405f-4f80-a2ed-e749a3803884-kube-api-access-vvttl\") pod \"barbican-worker-55d589c759-9xthl\" (UID: \"708609f6-405f-4f80-a2ed-e749a3803884\") " pod="openstack/barbican-worker-55d589c759-9xthl" Oct 11 04:11:47 crc kubenswrapper[4798]: I1011 04:11:47.034246 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rzjbj\" (UniqueName: \"kubernetes.io/projected/141c65d6-e8c4-4ae3-be7b-7adf75193efc-kube-api-access-rzjbj\") pod \"barbican-keystone-listener-6574767f6b-8p24k\" (UID: \"141c65d6-e8c4-4ae3-be7b-7adf75193efc\") " pod="openstack/barbican-keystone-listener-6574767f6b-8p24k" Oct 11 04:11:47 crc kubenswrapper[4798]: I1011 04:11:47.034321 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/141c65d6-e8c4-4ae3-be7b-7adf75193efc-logs\") pod \"barbican-keystone-listener-6574767f6b-8p24k\" (UID: \"141c65d6-e8c4-4ae3-be7b-7adf75193efc\") " pod="openstack/barbican-keystone-listener-6574767f6b-8p24k" Oct 11 04:11:47 crc kubenswrapper[4798]: I1011 04:11:47.034438 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/141c65d6-e8c4-4ae3-be7b-7adf75193efc-config-data\") pod \"barbican-keystone-listener-6574767f6b-8p24k\" (UID: \"141c65d6-e8c4-4ae3-be7b-7adf75193efc\") " pod="openstack/barbican-keystone-listener-6574767f6b-8p24k" Oct 11 04:11:47 crc kubenswrapper[4798]: I1011 04:11:47.034469 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/708609f6-405f-4f80-a2ed-e749a3803884-combined-ca-bundle\") pod \"barbican-worker-55d589c759-9xthl\" (UID: \"708609f6-405f-4f80-a2ed-e749a3803884\") " pod="openstack/barbican-worker-55d589c759-9xthl" Oct 11 04:11:47 crc kubenswrapper[4798]: I1011 04:11:47.034594 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/141c65d6-e8c4-4ae3-be7b-7adf75193efc-combined-ca-bundle\") pod \"barbican-keystone-listener-6574767f6b-8p24k\" (UID: \"141c65d6-e8c4-4ae3-be7b-7adf75193efc\") " pod="openstack/barbican-keystone-listener-6574767f6b-8p24k" Oct 11 04:11:47 crc kubenswrapper[4798]: I1011 04:11:47.034680 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/141c65d6-e8c4-4ae3-be7b-7adf75193efc-config-data-custom\") pod \"barbican-keystone-listener-6574767f6b-8p24k\" (UID: \"141c65d6-e8c4-4ae3-be7b-7adf75193efc\") " pod="openstack/barbican-keystone-listener-6574767f6b-8p24k" Oct 11 04:11:47 crc kubenswrapper[4798]: I1011 04:11:47.034710 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/708609f6-405f-4f80-a2ed-e749a3803884-config-data\") pod \"barbican-worker-55d589c759-9xthl\" (UID: \"708609f6-405f-4f80-a2ed-e749a3803884\") " pod="openstack/barbican-worker-55d589c759-9xthl" Oct 11 04:11:47 crc kubenswrapper[4798]: I1011 04:11:47.034754 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/708609f6-405f-4f80-a2ed-e749a3803884-config-data-custom\") pod \"barbican-worker-55d589c759-9xthl\" (UID: \"708609f6-405f-4f80-a2ed-e749a3803884\") " pod="openstack/barbican-worker-55d589c759-9xthl" Oct 11 04:11:47 crc kubenswrapper[4798]: I1011 04:11:47.034773 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/708609f6-405f-4f80-a2ed-e749a3803884-logs\") pod \"barbican-worker-55d589c759-9xthl\" (UID: \"708609f6-405f-4f80-a2ed-e749a3803884\") " pod="openstack/barbican-worker-55d589c759-9xthl" Oct 11 04:11:47 crc kubenswrapper[4798]: I1011 04:11:47.050097 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-6574767f6b-8p24k"] Oct 11 04:11:47 crc kubenswrapper[4798]: I1011 04:11:47.116362 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-7f46f79845-lc5wn"] Oct 11 04:11:47 crc kubenswrapper[4798]: I1011 04:11:47.117917 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7f46f79845-lc5wn" Oct 11 04:11:47 crc kubenswrapper[4798]: I1011 04:11:47.128984 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7f46f79845-lc5wn"] Oct 11 04:11:47 crc kubenswrapper[4798]: I1011 04:11:47.138137 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/141c65d6-e8c4-4ae3-be7b-7adf75193efc-config-data-custom\") pod \"barbican-keystone-listener-6574767f6b-8p24k\" (UID: \"141c65d6-e8c4-4ae3-be7b-7adf75193efc\") " pod="openstack/barbican-keystone-listener-6574767f6b-8p24k" Oct 11 04:11:47 crc kubenswrapper[4798]: I1011 04:11:47.138205 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/708609f6-405f-4f80-a2ed-e749a3803884-config-data\") pod \"barbican-worker-55d589c759-9xthl\" (UID: \"708609f6-405f-4f80-a2ed-e749a3803884\") " pod="openstack/barbican-worker-55d589c759-9xthl" Oct 11 04:11:47 crc kubenswrapper[4798]: I1011 04:11:47.138236 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/708609f6-405f-4f80-a2ed-e749a3803884-config-data-custom\") pod \"barbican-worker-55d589c759-9xthl\" (UID: \"708609f6-405f-4f80-a2ed-e749a3803884\") " pod="openstack/barbican-worker-55d589c759-9xthl" Oct 11 04:11:47 crc kubenswrapper[4798]: I1011 04:11:47.138262 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/708609f6-405f-4f80-a2ed-e749a3803884-logs\") pod \"barbican-worker-55d589c759-9xthl\" (UID: \"708609f6-405f-4f80-a2ed-e749a3803884\") " pod="openstack/barbican-worker-55d589c759-9xthl" Oct 11 04:11:47 crc kubenswrapper[4798]: I1011 04:11:47.140161 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vvttl\" (UniqueName: \"kubernetes.io/projected/708609f6-405f-4f80-a2ed-e749a3803884-kube-api-access-vvttl\") pod \"barbican-worker-55d589c759-9xthl\" (UID: \"708609f6-405f-4f80-a2ed-e749a3803884\") " pod="openstack/barbican-worker-55d589c759-9xthl" Oct 11 04:11:47 crc kubenswrapper[4798]: I1011 04:11:47.140207 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rzjbj\" (UniqueName: \"kubernetes.io/projected/141c65d6-e8c4-4ae3-be7b-7adf75193efc-kube-api-access-rzjbj\") pod \"barbican-keystone-listener-6574767f6b-8p24k\" (UID: \"141c65d6-e8c4-4ae3-be7b-7adf75193efc\") " pod="openstack/barbican-keystone-listener-6574767f6b-8p24k" Oct 11 04:11:47 crc kubenswrapper[4798]: I1011 04:11:47.140236 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/141c65d6-e8c4-4ae3-be7b-7adf75193efc-logs\") pod \"barbican-keystone-listener-6574767f6b-8p24k\" (UID: \"141c65d6-e8c4-4ae3-be7b-7adf75193efc\") " pod="openstack/barbican-keystone-listener-6574767f6b-8p24k" Oct 11 04:11:47 crc kubenswrapper[4798]: I1011 04:11:47.140290 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/141c65d6-e8c4-4ae3-be7b-7adf75193efc-config-data\") pod \"barbican-keystone-listener-6574767f6b-8p24k\" (UID: \"141c65d6-e8c4-4ae3-be7b-7adf75193efc\") " pod="openstack/barbican-keystone-listener-6574767f6b-8p24k" Oct 11 04:11:47 crc kubenswrapper[4798]: I1011 04:11:47.140340 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/708609f6-405f-4f80-a2ed-e749a3803884-combined-ca-bundle\") pod \"barbican-worker-55d589c759-9xthl\" (UID: \"708609f6-405f-4f80-a2ed-e749a3803884\") " pod="openstack/barbican-worker-55d589c759-9xthl" Oct 11 04:11:47 crc kubenswrapper[4798]: I1011 04:11:47.140478 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/141c65d6-e8c4-4ae3-be7b-7adf75193efc-combined-ca-bundle\") pod \"barbican-keystone-listener-6574767f6b-8p24k\" (UID: \"141c65d6-e8c4-4ae3-be7b-7adf75193efc\") " pod="openstack/barbican-keystone-listener-6574767f6b-8p24k" Oct 11 04:11:47 crc kubenswrapper[4798]: I1011 04:11:47.146643 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/141c65d6-e8c4-4ae3-be7b-7adf75193efc-combined-ca-bundle\") pod \"barbican-keystone-listener-6574767f6b-8p24k\" (UID: \"141c65d6-e8c4-4ae3-be7b-7adf75193efc\") " pod="openstack/barbican-keystone-listener-6574767f6b-8p24k" Oct 11 04:11:47 crc kubenswrapper[4798]: I1011 04:11:47.150927 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/708609f6-405f-4f80-a2ed-e749a3803884-config-data-custom\") pod \"barbican-worker-55d589c759-9xthl\" (UID: \"708609f6-405f-4f80-a2ed-e749a3803884\") " pod="openstack/barbican-worker-55d589c759-9xthl" Oct 11 04:11:47 crc kubenswrapper[4798]: I1011 04:11:47.151361 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/708609f6-405f-4f80-a2ed-e749a3803884-logs\") pod \"barbican-worker-55d589c759-9xthl\" (UID: \"708609f6-405f-4f80-a2ed-e749a3803884\") " pod="openstack/barbican-worker-55d589c759-9xthl" Oct 11 04:11:47 crc kubenswrapper[4798]: I1011 04:11:47.151702 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/141c65d6-e8c4-4ae3-be7b-7adf75193efc-logs\") pod \"barbican-keystone-listener-6574767f6b-8p24k\" (UID: \"141c65d6-e8c4-4ae3-be7b-7adf75193efc\") " pod="openstack/barbican-keystone-listener-6574767f6b-8p24k" Oct 11 04:11:47 crc kubenswrapper[4798]: I1011 04:11:47.160606 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/141c65d6-e8c4-4ae3-be7b-7adf75193efc-config-data-custom\") pod \"barbican-keystone-listener-6574767f6b-8p24k\" (UID: \"141c65d6-e8c4-4ae3-be7b-7adf75193efc\") " pod="openstack/barbican-keystone-listener-6574767f6b-8p24k" Oct 11 04:11:47 crc kubenswrapper[4798]: I1011 04:11:47.161293 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/708609f6-405f-4f80-a2ed-e749a3803884-config-data\") pod \"barbican-worker-55d589c759-9xthl\" (UID: \"708609f6-405f-4f80-a2ed-e749a3803884\") " pod="openstack/barbican-worker-55d589c759-9xthl" Oct 11 04:11:47 crc kubenswrapper[4798]: I1011 04:11:47.162026 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/708609f6-405f-4f80-a2ed-e749a3803884-combined-ca-bundle\") pod \"barbican-worker-55d589c759-9xthl\" (UID: \"708609f6-405f-4f80-a2ed-e749a3803884\") " pod="openstack/barbican-worker-55d589c759-9xthl" Oct 11 04:11:47 crc kubenswrapper[4798]: I1011 04:11:47.165691 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/141c65d6-e8c4-4ae3-be7b-7adf75193efc-config-data\") pod \"barbican-keystone-listener-6574767f6b-8p24k\" (UID: \"141c65d6-e8c4-4ae3-be7b-7adf75193efc\") " pod="openstack/barbican-keystone-listener-6574767f6b-8p24k" Oct 11 04:11:47 crc kubenswrapper[4798]: I1011 04:11:47.176943 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rzjbj\" (UniqueName: \"kubernetes.io/projected/141c65d6-e8c4-4ae3-be7b-7adf75193efc-kube-api-access-rzjbj\") pod \"barbican-keystone-listener-6574767f6b-8p24k\" (UID: \"141c65d6-e8c4-4ae3-be7b-7adf75193efc\") " pod="openstack/barbican-keystone-listener-6574767f6b-8p24k" Oct 11 04:11:47 crc kubenswrapper[4798]: I1011 04:11:47.183124 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vvttl\" (UniqueName: \"kubernetes.io/projected/708609f6-405f-4f80-a2ed-e749a3803884-kube-api-access-vvttl\") pod \"barbican-worker-55d589c759-9xthl\" (UID: \"708609f6-405f-4f80-a2ed-e749a3803884\") " pod="openstack/barbican-worker-55d589c759-9xthl" Oct 11 04:11:47 crc kubenswrapper[4798]: I1011 04:11:47.230584 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-68596b54cd-66kzb"] Oct 11 04:11:47 crc kubenswrapper[4798]: I1011 04:11:47.234605 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-68596b54cd-66kzb" Oct 11 04:11:47 crc kubenswrapper[4798]: I1011 04:11:47.238345 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-api-config-data" Oct 11 04:11:47 crc kubenswrapper[4798]: I1011 04:11:47.244152 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/1a9dc4f3-49eb-45e4-973c-8556635d4b23-ovsdbserver-nb\") pod \"dnsmasq-dns-7f46f79845-lc5wn\" (UID: \"1a9dc4f3-49eb-45e4-973c-8556635d4b23\") " pod="openstack/dnsmasq-dns-7f46f79845-lc5wn" Oct 11 04:11:47 crc kubenswrapper[4798]: I1011 04:11:47.244274 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1a9dc4f3-49eb-45e4-973c-8556635d4b23-dns-svc\") pod \"dnsmasq-dns-7f46f79845-lc5wn\" (UID: \"1a9dc4f3-49eb-45e4-973c-8556635d4b23\") " pod="openstack/dnsmasq-dns-7f46f79845-lc5wn" Oct 11 04:11:47 crc kubenswrapper[4798]: I1011 04:11:47.244318 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kcrth\" (UniqueName: \"kubernetes.io/projected/1a9dc4f3-49eb-45e4-973c-8556635d4b23-kube-api-access-kcrth\") pod \"dnsmasq-dns-7f46f79845-lc5wn\" (UID: \"1a9dc4f3-49eb-45e4-973c-8556635d4b23\") " pod="openstack/dnsmasq-dns-7f46f79845-lc5wn" Oct 11 04:11:47 crc kubenswrapper[4798]: I1011 04:11:47.244411 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/1a9dc4f3-49eb-45e4-973c-8556635d4b23-ovsdbserver-sb\") pod \"dnsmasq-dns-7f46f79845-lc5wn\" (UID: \"1a9dc4f3-49eb-45e4-973c-8556635d4b23\") " pod="openstack/dnsmasq-dns-7f46f79845-lc5wn" Oct 11 04:11:47 crc kubenswrapper[4798]: I1011 04:11:47.244550 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1a9dc4f3-49eb-45e4-973c-8556635d4b23-config\") pod \"dnsmasq-dns-7f46f79845-lc5wn\" (UID: \"1a9dc4f3-49eb-45e4-973c-8556635d4b23\") " pod="openstack/dnsmasq-dns-7f46f79845-lc5wn" Oct 11 04:11:47 crc kubenswrapper[4798]: I1011 04:11:47.262666 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-68596b54cd-66kzb"] Oct 11 04:11:47 crc kubenswrapper[4798]: I1011 04:11:47.328440 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-55d589c759-9xthl" Oct 11 04:11:47 crc kubenswrapper[4798]: I1011 04:11:47.333584 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-6574767f6b-8p24k" Oct 11 04:11:47 crc kubenswrapper[4798]: I1011 04:11:47.345754 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1a9dc4f3-49eb-45e4-973c-8556635d4b23-dns-svc\") pod \"dnsmasq-dns-7f46f79845-lc5wn\" (UID: \"1a9dc4f3-49eb-45e4-973c-8556635d4b23\") " pod="openstack/dnsmasq-dns-7f46f79845-lc5wn" Oct 11 04:11:47 crc kubenswrapper[4798]: I1011 04:11:47.345828 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kcrth\" (UniqueName: \"kubernetes.io/projected/1a9dc4f3-49eb-45e4-973c-8556635d4b23-kube-api-access-kcrth\") pod \"dnsmasq-dns-7f46f79845-lc5wn\" (UID: \"1a9dc4f3-49eb-45e4-973c-8556635d4b23\") " pod="openstack/dnsmasq-dns-7f46f79845-lc5wn" Oct 11 04:11:47 crc kubenswrapper[4798]: I1011 04:11:47.345898 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a5162cce-7f28-490f-8a4d-f1d10824cafa-config-data\") pod \"barbican-api-68596b54cd-66kzb\" (UID: \"a5162cce-7f28-490f-8a4d-f1d10824cafa\") " pod="openstack/barbican-api-68596b54cd-66kzb" Oct 11 04:11:47 crc kubenswrapper[4798]: I1011 04:11:47.345956 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/1a9dc4f3-49eb-45e4-973c-8556635d4b23-ovsdbserver-sb\") pod \"dnsmasq-dns-7f46f79845-lc5wn\" (UID: \"1a9dc4f3-49eb-45e4-973c-8556635d4b23\") " pod="openstack/dnsmasq-dns-7f46f79845-lc5wn" Oct 11 04:11:47 crc kubenswrapper[4798]: I1011 04:11:47.345994 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a5162cce-7f28-490f-8a4d-f1d10824cafa-config-data-custom\") pod \"barbican-api-68596b54cd-66kzb\" (UID: \"a5162cce-7f28-490f-8a4d-f1d10824cafa\") " pod="openstack/barbican-api-68596b54cd-66kzb" Oct 11 04:11:47 crc kubenswrapper[4798]: I1011 04:11:47.346100 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a5162cce-7f28-490f-8a4d-f1d10824cafa-combined-ca-bundle\") pod \"barbican-api-68596b54cd-66kzb\" (UID: \"a5162cce-7f28-490f-8a4d-f1d10824cafa\") " pod="openstack/barbican-api-68596b54cd-66kzb" Oct 11 04:11:47 crc kubenswrapper[4798]: I1011 04:11:47.346142 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a5162cce-7f28-490f-8a4d-f1d10824cafa-logs\") pod \"barbican-api-68596b54cd-66kzb\" (UID: \"a5162cce-7f28-490f-8a4d-f1d10824cafa\") " pod="openstack/barbican-api-68596b54cd-66kzb" Oct 11 04:11:47 crc kubenswrapper[4798]: I1011 04:11:47.346205 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1a9dc4f3-49eb-45e4-973c-8556635d4b23-config\") pod \"dnsmasq-dns-7f46f79845-lc5wn\" (UID: \"1a9dc4f3-49eb-45e4-973c-8556635d4b23\") " pod="openstack/dnsmasq-dns-7f46f79845-lc5wn" Oct 11 04:11:47 crc kubenswrapper[4798]: I1011 04:11:47.346242 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/1a9dc4f3-49eb-45e4-973c-8556635d4b23-ovsdbserver-nb\") pod \"dnsmasq-dns-7f46f79845-lc5wn\" (UID: \"1a9dc4f3-49eb-45e4-973c-8556635d4b23\") " pod="openstack/dnsmasq-dns-7f46f79845-lc5wn" Oct 11 04:11:47 crc kubenswrapper[4798]: I1011 04:11:47.346267 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rp5bm\" (UniqueName: \"kubernetes.io/projected/a5162cce-7f28-490f-8a4d-f1d10824cafa-kube-api-access-rp5bm\") pod \"barbican-api-68596b54cd-66kzb\" (UID: \"a5162cce-7f28-490f-8a4d-f1d10824cafa\") " pod="openstack/barbican-api-68596b54cd-66kzb" Oct 11 04:11:47 crc kubenswrapper[4798]: I1011 04:11:47.347532 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/1a9dc4f3-49eb-45e4-973c-8556635d4b23-ovsdbserver-sb\") pod \"dnsmasq-dns-7f46f79845-lc5wn\" (UID: \"1a9dc4f3-49eb-45e4-973c-8556635d4b23\") " pod="openstack/dnsmasq-dns-7f46f79845-lc5wn" Oct 11 04:11:47 crc kubenswrapper[4798]: I1011 04:11:47.349125 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/1a9dc4f3-49eb-45e4-973c-8556635d4b23-ovsdbserver-nb\") pod \"dnsmasq-dns-7f46f79845-lc5wn\" (UID: \"1a9dc4f3-49eb-45e4-973c-8556635d4b23\") " pod="openstack/dnsmasq-dns-7f46f79845-lc5wn" Oct 11 04:11:47 crc kubenswrapper[4798]: I1011 04:11:47.348383 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1a9dc4f3-49eb-45e4-973c-8556635d4b23-config\") pod \"dnsmasq-dns-7f46f79845-lc5wn\" (UID: \"1a9dc4f3-49eb-45e4-973c-8556635d4b23\") " pod="openstack/dnsmasq-dns-7f46f79845-lc5wn" Oct 11 04:11:47 crc kubenswrapper[4798]: I1011 04:11:47.352535 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1a9dc4f3-49eb-45e4-973c-8556635d4b23-dns-svc\") pod \"dnsmasq-dns-7f46f79845-lc5wn\" (UID: \"1a9dc4f3-49eb-45e4-973c-8556635d4b23\") " pod="openstack/dnsmasq-dns-7f46f79845-lc5wn" Oct 11 04:11:47 crc kubenswrapper[4798]: I1011 04:11:47.363798 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kcrth\" (UniqueName: \"kubernetes.io/projected/1a9dc4f3-49eb-45e4-973c-8556635d4b23-kube-api-access-kcrth\") pod \"dnsmasq-dns-7f46f79845-lc5wn\" (UID: \"1a9dc4f3-49eb-45e4-973c-8556635d4b23\") " pod="openstack/dnsmasq-dns-7f46f79845-lc5wn" Oct 11 04:11:47 crc kubenswrapper[4798]: I1011 04:11:47.447838 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a5162cce-7f28-490f-8a4d-f1d10824cafa-config-data\") pod \"barbican-api-68596b54cd-66kzb\" (UID: \"a5162cce-7f28-490f-8a4d-f1d10824cafa\") " pod="openstack/barbican-api-68596b54cd-66kzb" Oct 11 04:11:47 crc kubenswrapper[4798]: I1011 04:11:47.447896 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a5162cce-7f28-490f-8a4d-f1d10824cafa-config-data-custom\") pod \"barbican-api-68596b54cd-66kzb\" (UID: \"a5162cce-7f28-490f-8a4d-f1d10824cafa\") " pod="openstack/barbican-api-68596b54cd-66kzb" Oct 11 04:11:47 crc kubenswrapper[4798]: I1011 04:11:47.447950 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a5162cce-7f28-490f-8a4d-f1d10824cafa-combined-ca-bundle\") pod \"barbican-api-68596b54cd-66kzb\" (UID: \"a5162cce-7f28-490f-8a4d-f1d10824cafa\") " pod="openstack/barbican-api-68596b54cd-66kzb" Oct 11 04:11:47 crc kubenswrapper[4798]: I1011 04:11:47.447978 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a5162cce-7f28-490f-8a4d-f1d10824cafa-logs\") pod \"barbican-api-68596b54cd-66kzb\" (UID: \"a5162cce-7f28-490f-8a4d-f1d10824cafa\") " pod="openstack/barbican-api-68596b54cd-66kzb" Oct 11 04:11:47 crc kubenswrapper[4798]: I1011 04:11:47.448018 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rp5bm\" (UniqueName: \"kubernetes.io/projected/a5162cce-7f28-490f-8a4d-f1d10824cafa-kube-api-access-rp5bm\") pod \"barbican-api-68596b54cd-66kzb\" (UID: \"a5162cce-7f28-490f-8a4d-f1d10824cafa\") " pod="openstack/barbican-api-68596b54cd-66kzb" Oct 11 04:11:47 crc kubenswrapper[4798]: I1011 04:11:47.448766 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a5162cce-7f28-490f-8a4d-f1d10824cafa-logs\") pod \"barbican-api-68596b54cd-66kzb\" (UID: \"a5162cce-7f28-490f-8a4d-f1d10824cafa\") " pod="openstack/barbican-api-68596b54cd-66kzb" Oct 11 04:11:47 crc kubenswrapper[4798]: I1011 04:11:47.451468 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7f46f79845-lc5wn" Oct 11 04:11:47 crc kubenswrapper[4798]: I1011 04:11:47.457862 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a5162cce-7f28-490f-8a4d-f1d10824cafa-config-data\") pod \"barbican-api-68596b54cd-66kzb\" (UID: \"a5162cce-7f28-490f-8a4d-f1d10824cafa\") " pod="openstack/barbican-api-68596b54cd-66kzb" Oct 11 04:11:47 crc kubenswrapper[4798]: I1011 04:11:47.460759 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a5162cce-7f28-490f-8a4d-f1d10824cafa-config-data-custom\") pod \"barbican-api-68596b54cd-66kzb\" (UID: \"a5162cce-7f28-490f-8a4d-f1d10824cafa\") " pod="openstack/barbican-api-68596b54cd-66kzb" Oct 11 04:11:47 crc kubenswrapper[4798]: I1011 04:11:47.461536 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a5162cce-7f28-490f-8a4d-f1d10824cafa-combined-ca-bundle\") pod \"barbican-api-68596b54cd-66kzb\" (UID: \"a5162cce-7f28-490f-8a4d-f1d10824cafa\") " pod="openstack/barbican-api-68596b54cd-66kzb" Oct 11 04:11:47 crc kubenswrapper[4798]: I1011 04:11:47.464233 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rp5bm\" (UniqueName: \"kubernetes.io/projected/a5162cce-7f28-490f-8a4d-f1d10824cafa-kube-api-access-rp5bm\") pod \"barbican-api-68596b54cd-66kzb\" (UID: \"a5162cce-7f28-490f-8a4d-f1d10824cafa\") " pod="openstack/barbican-api-68596b54cd-66kzb" Oct 11 04:11:47 crc kubenswrapper[4798]: I1011 04:11:47.570158 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-68596b54cd-66kzb" Oct 11 04:11:47 crc kubenswrapper[4798]: E1011 04:11:47.572928 4798 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/sg-core:latest" Oct 11 04:11:47 crc kubenswrapper[4798]: E1011 04:11:47.573083 4798 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:sg-core,Image:quay.io/openstack-k8s-operators/sg-core:latest,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:sg-core-conf-yaml,ReadOnly:false,MountPath:/etc/sg-core.conf.yaml,SubPath:sg-core.conf.yaml,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-6ptmg,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*0,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(0020f7ba-0c70-4bcf-90a8-68729a98025c): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Oct 11 04:11:48 crc kubenswrapper[4798]: I1011 04:11:48.197648 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bf585fdff-grnph"] Oct 11 04:11:48 crc kubenswrapper[4798]: I1011 04:11:48.227106 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bf585fdff-grnph" event={"ID":"0d75e3b2-e53b-4823-9272-b7038c8b379c","Type":"ContainerStarted","Data":"a3f8f96060ae53fc69b1621bf3553e2af17804472aa86a064f3fad65fb8b7751"} Oct 11 04:11:48 crc kubenswrapper[4798]: I1011 04:11:48.228797 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-f5wn5" event={"ID":"1afeba46-c2cc-455e-ab80-56f553c9e4cb","Type":"ContainerStarted","Data":"7d8369d5b8f2bd49e7d5814f6b9e1a896c5c2e00b1bb385b16320a0452556933"} Oct 11 04:11:48 crc kubenswrapper[4798]: I1011 04:11:48.236838 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-55d589c759-9xthl"] Oct 11 04:11:48 crc kubenswrapper[4798]: I1011 04:11:48.239517 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-f5wn5"] Oct 11 04:11:48 crc kubenswrapper[4798]: I1011 04:11:48.243342 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-55d589c759-9xthl" event={"ID":"708609f6-405f-4f80-a2ed-e749a3803884","Type":"ContainerStarted","Data":"74778aaf34f0acbd59033505982faa074d966cfacf0e4d313471b7f04798903b"} Oct 11 04:11:48 crc kubenswrapper[4798]: I1011 04:11:48.245737 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-c67f5f784-ph78z" event={"ID":"0843bdb2-fc67-4c61-991e-383ebdb67136","Type":"ContainerStarted","Data":"03d5e6929da16cf956a50ede81bf1b824dd3b7bc3e2a462378bfbecc617aa3f1"} Oct 11 04:11:48 crc kubenswrapper[4798]: I1011 04:11:48.417166 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-6574767f6b-8p24k"] Oct 11 04:11:48 crc kubenswrapper[4798]: I1011 04:11:48.428185 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7f46f79845-lc5wn"] Oct 11 04:11:48 crc kubenswrapper[4798]: W1011 04:11:48.439960 4798 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod141c65d6_e8c4_4ae3_be7b_7adf75193efc.slice/crio-7a5dd93cc06d79bcf32c26f392287fb90adc3df1d49c9d6c2c0572945510a85d WatchSource:0}: Error finding container 7a5dd93cc06d79bcf32c26f392287fb90adc3df1d49c9d6c2c0572945510a85d: Status 404 returned error can't find the container with id 7a5dd93cc06d79bcf32c26f392287fb90adc3df1d49c9d6c2c0572945510a85d Oct 11 04:11:48 crc kubenswrapper[4798]: I1011 04:11:48.450341 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-68596b54cd-66kzb"] Oct 11 04:11:48 crc kubenswrapper[4798]: W1011 04:11:48.452962 4798 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1a9dc4f3_49eb_45e4_973c_8556635d4b23.slice/crio-cb6c28060d166bdf568e4d2b57b3902572460eecbaf5ea3942e220650fb801ca WatchSource:0}: Error finding container cb6c28060d166bdf568e4d2b57b3902572460eecbaf5ea3942e220650fb801ca: Status 404 returned error can't find the container with id cb6c28060d166bdf568e4d2b57b3902572460eecbaf5ea3942e220650fb801ca Oct 11 04:11:48 crc kubenswrapper[4798]: W1011 04:11:48.462146 4798 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda5162cce_7f28_490f_8a4d_f1d10824cafa.slice/crio-6a8eb0d0d05fe3eb8c85c0c11f11d38285e787dc14bb01e7868229819e59ae2e WatchSource:0}: Error finding container 6a8eb0d0d05fe3eb8c85c0c11f11d38285e787dc14bb01e7868229819e59ae2e: Status 404 returned error can't find the container with id 6a8eb0d0d05fe3eb8c85c0c11f11d38285e787dc14bb01e7868229819e59ae2e Oct 11 04:11:49 crc kubenswrapper[4798]: I1011 04:11:49.285454 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-c67f5f784-ph78z" event={"ID":"0843bdb2-fc67-4c61-991e-383ebdb67136","Type":"ContainerStarted","Data":"a4f61930cafd1108b5586efcbee46c4a918c99a823b4503ba669ba17bf48448e"} Oct 11 04:11:49 crc kubenswrapper[4798]: I1011 04:11:49.287983 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-c67f5f784-ph78z" Oct 11 04:11:49 crc kubenswrapper[4798]: I1011 04:11:49.288016 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-c67f5f784-ph78z" Oct 11 04:11:49 crc kubenswrapper[4798]: I1011 04:11:49.289199 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-6574767f6b-8p24k" event={"ID":"141c65d6-e8c4-4ae3-be7b-7adf75193efc","Type":"ContainerStarted","Data":"7a5dd93cc06d79bcf32c26f392287fb90adc3df1d49c9d6c2c0572945510a85d"} Oct 11 04:11:49 crc kubenswrapper[4798]: I1011 04:11:49.299514 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-68596b54cd-66kzb" event={"ID":"a5162cce-7f28-490f-8a4d-f1d10824cafa","Type":"ContainerStarted","Data":"a669bc49245b6310c09180cf470126e8dade49e6594de28edd1f013521855cf3"} Oct 11 04:11:49 crc kubenswrapper[4798]: I1011 04:11:49.299578 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-68596b54cd-66kzb" event={"ID":"a5162cce-7f28-490f-8a4d-f1d10824cafa","Type":"ContainerStarted","Data":"5d5f151c738d87a7b83579cfc83f1e6f46d64e19e01dae21a068939d62323028"} Oct 11 04:11:49 crc kubenswrapper[4798]: I1011 04:11:49.299591 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-68596b54cd-66kzb" event={"ID":"a5162cce-7f28-490f-8a4d-f1d10824cafa","Type":"ContainerStarted","Data":"6a8eb0d0d05fe3eb8c85c0c11f11d38285e787dc14bb01e7868229819e59ae2e"} Oct 11 04:11:49 crc kubenswrapper[4798]: I1011 04:11:49.302755 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-68596b54cd-66kzb" Oct 11 04:11:49 crc kubenswrapper[4798]: I1011 04:11:49.302797 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-68596b54cd-66kzb" Oct 11 04:11:49 crc kubenswrapper[4798]: I1011 04:11:49.314120 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-c67f5f784-ph78z" podStartSLOduration=16.314103924 podStartE2EDuration="16.314103924s" podCreationTimestamp="2025-10-11 04:11:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 04:11:49.30851424 +0000 UTC m=+1004.644803926" watchObservedRunningTime="2025-10-11 04:11:49.314103924 +0000 UTC m=+1004.650393610" Oct 11 04:11:49 crc kubenswrapper[4798]: I1011 04:11:49.318472 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bf585fdff-grnph" event={"ID":"0d75e3b2-e53b-4823-9272-b7038c8b379c","Type":"ContainerStarted","Data":"d1f99824df46b90b2ca7206605829989272e2491a8055638966f042f29e701d0"} Oct 11 04:11:49 crc kubenswrapper[4798]: I1011 04:11:49.319413 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/keystone-bf585fdff-grnph" Oct 11 04:11:49 crc kubenswrapper[4798]: I1011 04:11:49.322735 4798 generic.go:334] "Generic (PLEG): container finished" podID="1a9dc4f3-49eb-45e4-973c-8556635d4b23" containerID="eec7d039970114cae6055b3b75c68580d67680a90c0ec46a80b54f77bdfdd9ef" exitCode=0 Oct 11 04:11:49 crc kubenswrapper[4798]: I1011 04:11:49.322778 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7f46f79845-lc5wn" event={"ID":"1a9dc4f3-49eb-45e4-973c-8556635d4b23","Type":"ContainerDied","Data":"eec7d039970114cae6055b3b75c68580d67680a90c0ec46a80b54f77bdfdd9ef"} Oct 11 04:11:49 crc kubenswrapper[4798]: I1011 04:11:49.322793 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7f46f79845-lc5wn" event={"ID":"1a9dc4f3-49eb-45e4-973c-8556635d4b23","Type":"ContainerStarted","Data":"cb6c28060d166bdf568e4d2b57b3902572460eecbaf5ea3942e220650fb801ca"} Oct 11 04:11:49 crc kubenswrapper[4798]: I1011 04:11:49.333801 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-68596b54cd-66kzb" podStartSLOduration=2.333586199 podStartE2EDuration="2.333586199s" podCreationTimestamp="2025-10-11 04:11:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 04:11:49.325150838 +0000 UTC m=+1004.661440524" watchObservedRunningTime="2025-10-11 04:11:49.333586199 +0000 UTC m=+1004.669875885" Oct 11 04:11:49 crc kubenswrapper[4798]: I1011 04:11:49.360219 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-f5wn5" event={"ID":"1afeba46-c2cc-455e-ab80-56f553c9e4cb","Type":"ContainerStarted","Data":"26420071971f8dbe0fb85a3a7c12df46a1f5fb44fb1420e0ed0622a421d484da"} Oct 11 04:11:49 crc kubenswrapper[4798]: I1011 04:11:49.371440 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bf585fdff-grnph" podStartSLOduration=5.370081771 podStartE2EDuration="5.370081771s" podCreationTimestamp="2025-10-11 04:11:44 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 04:11:49.365995953 +0000 UTC m=+1004.702285639" watchObservedRunningTime="2025-10-11 04:11:49.370081771 +0000 UTC m=+1004.706371457" Oct 11 04:11:49 crc kubenswrapper[4798]: I1011 04:11:49.393926 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-db-sync-f5wn5" podStartSLOduration=12.39390295 podStartE2EDuration="12.39390295s" podCreationTimestamp="2025-10-11 04:11:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 04:11:49.384095425 +0000 UTC m=+1004.720385111" watchObservedRunningTime="2025-10-11 04:11:49.39390295 +0000 UTC m=+1004.730192636" Oct 11 04:11:50 crc kubenswrapper[4798]: I1011 04:11:50.009307 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-7466b4ffd-mfb97"] Oct 11 04:11:50 crc kubenswrapper[4798]: I1011 04:11:50.011373 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-7466b4ffd-mfb97" Oct 11 04:11:50 crc kubenswrapper[4798]: I1011 04:11:50.014883 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-public-svc" Oct 11 04:11:50 crc kubenswrapper[4798]: I1011 04:11:50.015091 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-internal-svc" Oct 11 04:11:50 crc kubenswrapper[4798]: I1011 04:11:50.036851 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-7466b4ffd-mfb97"] Oct 11 04:11:50 crc kubenswrapper[4798]: I1011 04:11:50.104845 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6f108baa-f968-4e0a-8227-8cc389329a06-combined-ca-bundle\") pod \"barbican-api-7466b4ffd-mfb97\" (UID: \"6f108baa-f968-4e0a-8227-8cc389329a06\") " pod="openstack/barbican-api-7466b4ffd-mfb97" Oct 11 04:11:50 crc kubenswrapper[4798]: I1011 04:11:50.104981 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6f108baa-f968-4e0a-8227-8cc389329a06-logs\") pod \"barbican-api-7466b4ffd-mfb97\" (UID: \"6f108baa-f968-4e0a-8227-8cc389329a06\") " pod="openstack/barbican-api-7466b4ffd-mfb97" Oct 11 04:11:50 crc kubenswrapper[4798]: I1011 04:11:50.105033 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6f108baa-f968-4e0a-8227-8cc389329a06-config-data\") pod \"barbican-api-7466b4ffd-mfb97\" (UID: \"6f108baa-f968-4e0a-8227-8cc389329a06\") " pod="openstack/barbican-api-7466b4ffd-mfb97" Oct 11 04:11:50 crc kubenswrapper[4798]: I1011 04:11:50.105061 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/6f108baa-f968-4e0a-8227-8cc389329a06-config-data-custom\") pod \"barbican-api-7466b4ffd-mfb97\" (UID: \"6f108baa-f968-4e0a-8227-8cc389329a06\") " pod="openstack/barbican-api-7466b4ffd-mfb97" Oct 11 04:11:50 crc kubenswrapper[4798]: I1011 04:11:50.105310 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/6f108baa-f968-4e0a-8227-8cc389329a06-internal-tls-certs\") pod \"barbican-api-7466b4ffd-mfb97\" (UID: \"6f108baa-f968-4e0a-8227-8cc389329a06\") " pod="openstack/barbican-api-7466b4ffd-mfb97" Oct 11 04:11:50 crc kubenswrapper[4798]: I1011 04:11:50.105878 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6pznq\" (UniqueName: \"kubernetes.io/projected/6f108baa-f968-4e0a-8227-8cc389329a06-kube-api-access-6pznq\") pod \"barbican-api-7466b4ffd-mfb97\" (UID: \"6f108baa-f968-4e0a-8227-8cc389329a06\") " pod="openstack/barbican-api-7466b4ffd-mfb97" Oct 11 04:11:50 crc kubenswrapper[4798]: I1011 04:11:50.106060 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/6f108baa-f968-4e0a-8227-8cc389329a06-public-tls-certs\") pod \"barbican-api-7466b4ffd-mfb97\" (UID: \"6f108baa-f968-4e0a-8227-8cc389329a06\") " pod="openstack/barbican-api-7466b4ffd-mfb97" Oct 11 04:11:50 crc kubenswrapper[4798]: I1011 04:11:50.208082 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/6f108baa-f968-4e0a-8227-8cc389329a06-internal-tls-certs\") pod \"barbican-api-7466b4ffd-mfb97\" (UID: \"6f108baa-f968-4e0a-8227-8cc389329a06\") " pod="openstack/barbican-api-7466b4ffd-mfb97" Oct 11 04:11:50 crc kubenswrapper[4798]: I1011 04:11:50.208142 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6pznq\" (UniqueName: \"kubernetes.io/projected/6f108baa-f968-4e0a-8227-8cc389329a06-kube-api-access-6pznq\") pod \"barbican-api-7466b4ffd-mfb97\" (UID: \"6f108baa-f968-4e0a-8227-8cc389329a06\") " pod="openstack/barbican-api-7466b4ffd-mfb97" Oct 11 04:11:50 crc kubenswrapper[4798]: I1011 04:11:50.208211 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/6f108baa-f968-4e0a-8227-8cc389329a06-public-tls-certs\") pod \"barbican-api-7466b4ffd-mfb97\" (UID: \"6f108baa-f968-4e0a-8227-8cc389329a06\") " pod="openstack/barbican-api-7466b4ffd-mfb97" Oct 11 04:11:50 crc kubenswrapper[4798]: I1011 04:11:50.208282 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6f108baa-f968-4e0a-8227-8cc389329a06-combined-ca-bundle\") pod \"barbican-api-7466b4ffd-mfb97\" (UID: \"6f108baa-f968-4e0a-8227-8cc389329a06\") " pod="openstack/barbican-api-7466b4ffd-mfb97" Oct 11 04:11:50 crc kubenswrapper[4798]: I1011 04:11:50.208346 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6f108baa-f968-4e0a-8227-8cc389329a06-logs\") pod \"barbican-api-7466b4ffd-mfb97\" (UID: \"6f108baa-f968-4e0a-8227-8cc389329a06\") " pod="openstack/barbican-api-7466b4ffd-mfb97" Oct 11 04:11:50 crc kubenswrapper[4798]: I1011 04:11:50.208403 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6f108baa-f968-4e0a-8227-8cc389329a06-config-data\") pod \"barbican-api-7466b4ffd-mfb97\" (UID: \"6f108baa-f968-4e0a-8227-8cc389329a06\") " pod="openstack/barbican-api-7466b4ffd-mfb97" Oct 11 04:11:50 crc kubenswrapper[4798]: I1011 04:11:50.208430 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/6f108baa-f968-4e0a-8227-8cc389329a06-config-data-custom\") pod \"barbican-api-7466b4ffd-mfb97\" (UID: \"6f108baa-f968-4e0a-8227-8cc389329a06\") " pod="openstack/barbican-api-7466b4ffd-mfb97" Oct 11 04:11:50 crc kubenswrapper[4798]: I1011 04:11:50.208994 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6f108baa-f968-4e0a-8227-8cc389329a06-logs\") pod \"barbican-api-7466b4ffd-mfb97\" (UID: \"6f108baa-f968-4e0a-8227-8cc389329a06\") " pod="openstack/barbican-api-7466b4ffd-mfb97" Oct 11 04:11:50 crc kubenswrapper[4798]: I1011 04:11:50.214800 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/6f108baa-f968-4e0a-8227-8cc389329a06-config-data-custom\") pod \"barbican-api-7466b4ffd-mfb97\" (UID: \"6f108baa-f968-4e0a-8227-8cc389329a06\") " pod="openstack/barbican-api-7466b4ffd-mfb97" Oct 11 04:11:50 crc kubenswrapper[4798]: I1011 04:11:50.214800 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/6f108baa-f968-4e0a-8227-8cc389329a06-public-tls-certs\") pod \"barbican-api-7466b4ffd-mfb97\" (UID: \"6f108baa-f968-4e0a-8227-8cc389329a06\") " pod="openstack/barbican-api-7466b4ffd-mfb97" Oct 11 04:11:50 crc kubenswrapper[4798]: I1011 04:11:50.215276 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6f108baa-f968-4e0a-8227-8cc389329a06-config-data\") pod \"barbican-api-7466b4ffd-mfb97\" (UID: \"6f108baa-f968-4e0a-8227-8cc389329a06\") " pod="openstack/barbican-api-7466b4ffd-mfb97" Oct 11 04:11:50 crc kubenswrapper[4798]: I1011 04:11:50.216013 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/6f108baa-f968-4e0a-8227-8cc389329a06-internal-tls-certs\") pod \"barbican-api-7466b4ffd-mfb97\" (UID: \"6f108baa-f968-4e0a-8227-8cc389329a06\") " pod="openstack/barbican-api-7466b4ffd-mfb97" Oct 11 04:11:50 crc kubenswrapper[4798]: I1011 04:11:50.220068 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6f108baa-f968-4e0a-8227-8cc389329a06-combined-ca-bundle\") pod \"barbican-api-7466b4ffd-mfb97\" (UID: \"6f108baa-f968-4e0a-8227-8cc389329a06\") " pod="openstack/barbican-api-7466b4ffd-mfb97" Oct 11 04:11:50 crc kubenswrapper[4798]: I1011 04:11:50.224302 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6pznq\" (UniqueName: \"kubernetes.io/projected/6f108baa-f968-4e0a-8227-8cc389329a06-kube-api-access-6pznq\") pod \"barbican-api-7466b4ffd-mfb97\" (UID: \"6f108baa-f968-4e0a-8227-8cc389329a06\") " pod="openstack/barbican-api-7466b4ffd-mfb97" Oct 11 04:11:50 crc kubenswrapper[4798]: I1011 04:11:50.333773 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-7466b4ffd-mfb97" Oct 11 04:11:50 crc kubenswrapper[4798]: I1011 04:11:50.859210 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-7466b4ffd-mfb97"] Oct 11 04:11:50 crc kubenswrapper[4798]: W1011 04:11:50.879202 4798 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6f108baa_f968_4e0a_8227_8cc389329a06.slice/crio-998f525293a2cfa9be0a1257e38328475857e73d569887d7b08acb3fb21160f7 WatchSource:0}: Error finding container 998f525293a2cfa9be0a1257e38328475857e73d569887d7b08acb3fb21160f7: Status 404 returned error can't find the container with id 998f525293a2cfa9be0a1257e38328475857e73d569887d7b08acb3fb21160f7 Oct 11 04:11:51 crc kubenswrapper[4798]: I1011 04:11:51.387978 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-7466b4ffd-mfb97" event={"ID":"6f108baa-f968-4e0a-8227-8cc389329a06","Type":"ContainerStarted","Data":"998f525293a2cfa9be0a1257e38328475857e73d569887d7b08acb3fb21160f7"} Oct 11 04:11:55 crc kubenswrapper[4798]: I1011 04:11:55.457904 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7f46f79845-lc5wn" event={"ID":"1a9dc4f3-49eb-45e4-973c-8556635d4b23","Type":"ContainerStarted","Data":"e9911bfeaebd051b283f37780e60b4f31c60e7ae997869f1b9d02825da1a40a9"} Oct 11 04:11:56 crc kubenswrapper[4798]: I1011 04:11:56.467271 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-7466b4ffd-mfb97" event={"ID":"6f108baa-f968-4e0a-8227-8cc389329a06","Type":"ContainerStarted","Data":"fd4c300985f73d764b9ace6909eac5f25b64b84cdfe353e45417cabb7296534f"} Oct 11 04:11:56 crc kubenswrapper[4798]: I1011 04:11:56.577716 4798 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/barbican-api-68596b54cd-66kzb" podUID="a5162cce-7f28-490f-8a4d-f1d10824cafa" containerName="barbican-api-log" probeResult="failure" output="HTTP probe failed with statuscode: 500" Oct 11 04:11:57 crc kubenswrapper[4798]: I1011 04:11:57.477817 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-7f46f79845-lc5wn" Oct 11 04:11:58 crc kubenswrapper[4798]: I1011 04:11:58.444908 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-7f46f79845-lc5wn" podStartSLOduration=11.444889888 podStartE2EDuration="11.444889888s" podCreationTimestamp="2025-10-11 04:11:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 04:11:57.508130667 +0000 UTC m=+1012.844420353" watchObservedRunningTime="2025-10-11 04:11:58.444889888 +0000 UTC m=+1013.781179574" Oct 11 04:11:59 crc kubenswrapper[4798]: I1011 04:11:59.099438 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-68596b54cd-66kzb" Oct 11 04:11:59 crc kubenswrapper[4798]: I1011 04:11:59.103253 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-68596b54cd-66kzb" Oct 11 04:12:01 crc kubenswrapper[4798]: E1011 04:12:01.256324 4798 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-barbican-keystone-listener:current-podified" Oct 11 04:12:01 crc kubenswrapper[4798]: E1011 04:12:01.257225 4798 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:barbican-keystone-listener-log,Image:quay.io/podified-antelope-centos9/openstack-barbican-keystone-listener:current-podified,Command:[/usr/bin/dumb-init],Args:[--single-child -- /usr/bin/tail -n+1 -F /var/log/barbican/barbican-keystone-listener.log],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n5b4h5f9h646h656h5cbh595h598h5cbh68bh5bdh5dhb5h656h57ch648hd5hc8hdhd7h5d7h64fh67fhb9h545h54h679hf6h5bdh5ddh5b7h68h64cq,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:logs,ReadOnly:false,MountPath:/var/log/barbican,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-rzjbj,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42403,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:*42403,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod barbican-keystone-listener-6574767f6b-8p24k_openstack(141c65d6-e8c4-4ae3-be7b-7adf75193efc): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Oct 11 04:12:01 crc kubenswrapper[4798]: E1011 04:12:01.259350 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"barbican-keystone-listener-log\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\", failed to \"StartContainer\" for \"barbican-keystone-listener\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-barbican-keystone-listener:current-podified\\\"\"]" pod="openstack/barbican-keystone-listener-6574767f6b-8p24k" podUID="141c65d6-e8c4-4ae3-be7b-7adf75193efc" Oct 11 04:12:01 crc kubenswrapper[4798]: E1011 04:12:01.529864 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"barbican-keystone-listener-log\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-barbican-keystone-listener:current-podified\\\"\", failed to \"StartContainer\" for \"barbican-keystone-listener\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-barbican-keystone-listener:current-podified\\\"\"]" pod="openstack/barbican-keystone-listener-6574767f6b-8p24k" podUID="141c65d6-e8c4-4ae3-be7b-7adf75193efc" Oct 11 04:12:02 crc kubenswrapper[4798]: E1011 04:12:02.138237 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"sg-core\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/ceilometer-0" podUID="0020f7ba-0c70-4bcf-90a8-68729a98025c" Oct 11 04:12:02 crc kubenswrapper[4798]: I1011 04:12:02.453570 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-7f46f79845-lc5wn" Oct 11 04:12:02 crc kubenswrapper[4798]: I1011 04:12:02.535278 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-7466b4ffd-mfb97" event={"ID":"6f108baa-f968-4e0a-8227-8cc389329a06","Type":"ContainerStarted","Data":"efd5721aa09cec9372a08b7a71e53866aaf09720a7efb968d8e6ac0ece5908e3"} Oct 11 04:12:02 crc kubenswrapper[4798]: I1011 04:12:02.537107 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5b6dbdb6f5-w26gm"] Oct 11 04:12:02 crc kubenswrapper[4798]: I1011 04:12:02.543165 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-55d589c759-9xthl" event={"ID":"708609f6-405f-4f80-a2ed-e749a3803884","Type":"ContainerStarted","Data":"dc1ca61964152d8ec987d94c525d23729fa700a42a148007159f5b8b356f5a9f"} Oct 11 04:12:02 crc kubenswrapper[4798]: I1011 04:12:02.543230 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-55d589c759-9xthl" event={"ID":"708609f6-405f-4f80-a2ed-e749a3803884","Type":"ContainerStarted","Data":"1de7bac283baeb49280c641c35eb58d764bf1daa44a4f6d4cb0a8c00dcb1d0fd"} Oct 11 04:12:02 crc kubenswrapper[4798]: I1011 04:12:02.544096 4798 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-5b6dbdb6f5-w26gm" podUID="d40dd511-9540-43b0-b019-9d58ecb9a4fe" containerName="dnsmasq-dns" containerID="cri-o://b1b32daac8bca48b9ec69c30e43e9bd7135e20631e212757ee0d99e68e9b0639" gracePeriod=10 Oct 11 04:12:02 crc kubenswrapper[4798]: I1011 04:12:02.544900 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-7466b4ffd-mfb97" Oct 11 04:12:02 crc kubenswrapper[4798]: I1011 04:12:02.544924 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-7466b4ffd-mfb97" Oct 11 04:12:02 crc kubenswrapper[4798]: I1011 04:12:02.552680 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0020f7ba-0c70-4bcf-90a8-68729a98025c","Type":"ContainerStarted","Data":"abc3e13a6d773001b3bd58449bbbcf5570eaf4a6c943b999d4fcc9c9b57c066f"} Oct 11 04:12:02 crc kubenswrapper[4798]: I1011 04:12:02.552872 4798 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="0020f7ba-0c70-4bcf-90a8-68729a98025c" containerName="ceilometer-central-agent" containerID="cri-o://71a71593f08fe793927ee7efa2ee7dde883b2df0ced200ba8c2bb87304a7e14b" gracePeriod=30 Oct 11 04:12:02 crc kubenswrapper[4798]: I1011 04:12:02.552986 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Oct 11 04:12:02 crc kubenswrapper[4798]: I1011 04:12:02.553040 4798 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="0020f7ba-0c70-4bcf-90a8-68729a98025c" containerName="proxy-httpd" containerID="cri-o://abc3e13a6d773001b3bd58449bbbcf5570eaf4a6c943b999d4fcc9c9b57c066f" gracePeriod=30 Oct 11 04:12:02 crc kubenswrapper[4798]: I1011 04:12:02.553292 4798 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="0020f7ba-0c70-4bcf-90a8-68729a98025c" containerName="ceilometer-notification-agent" containerID="cri-o://81b3e9df75092a3efda3ab28bcc5bdcc08af56888948c5568fc5e4c04810cc9f" gracePeriod=30 Oct 11 04:12:02 crc kubenswrapper[4798]: I1011 04:12:02.588330 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-7466b4ffd-mfb97" podStartSLOduration=13.588311678 podStartE2EDuration="13.588311678s" podCreationTimestamp="2025-10-11 04:11:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 04:12:02.587836327 +0000 UTC m=+1017.924126033" watchObservedRunningTime="2025-10-11 04:12:02.588311678 +0000 UTC m=+1017.924601364" Oct 11 04:12:02 crc kubenswrapper[4798]: I1011 04:12:02.682124 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-worker-55d589c759-9xthl" podStartSLOduration=3.173744612 podStartE2EDuration="16.682094378s" podCreationTimestamp="2025-10-11 04:11:46 +0000 UTC" firstStartedPulling="2025-10-11 04:11:48.190086071 +0000 UTC m=+1003.526375757" lastFinishedPulling="2025-10-11 04:12:01.698435837 +0000 UTC m=+1017.034725523" observedRunningTime="2025-10-11 04:12:02.666206629 +0000 UTC m=+1018.002496325" watchObservedRunningTime="2025-10-11 04:12:02.682094378 +0000 UTC m=+1018.018384064" Oct 11 04:12:03 crc kubenswrapper[4798]: I1011 04:12:03.490610 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5b6dbdb6f5-w26gm" Oct 11 04:12:03 crc kubenswrapper[4798]: I1011 04:12:03.555801 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d40dd511-9540-43b0-b019-9d58ecb9a4fe-ovsdbserver-sb\") pod \"d40dd511-9540-43b0-b019-9d58ecb9a4fe\" (UID: \"d40dd511-9540-43b0-b019-9d58ecb9a4fe\") " Oct 11 04:12:03 crc kubenswrapper[4798]: I1011 04:12:03.555848 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rp9p8\" (UniqueName: \"kubernetes.io/projected/d40dd511-9540-43b0-b019-9d58ecb9a4fe-kube-api-access-rp9p8\") pod \"d40dd511-9540-43b0-b019-9d58ecb9a4fe\" (UID: \"d40dd511-9540-43b0-b019-9d58ecb9a4fe\") " Oct 11 04:12:03 crc kubenswrapper[4798]: I1011 04:12:03.555894 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d40dd511-9540-43b0-b019-9d58ecb9a4fe-config\") pod \"d40dd511-9540-43b0-b019-9d58ecb9a4fe\" (UID: \"d40dd511-9540-43b0-b019-9d58ecb9a4fe\") " Oct 11 04:12:03 crc kubenswrapper[4798]: I1011 04:12:03.556009 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d40dd511-9540-43b0-b019-9d58ecb9a4fe-dns-svc\") pod \"d40dd511-9540-43b0-b019-9d58ecb9a4fe\" (UID: \"d40dd511-9540-43b0-b019-9d58ecb9a4fe\") " Oct 11 04:12:03 crc kubenswrapper[4798]: I1011 04:12:03.556123 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d40dd511-9540-43b0-b019-9d58ecb9a4fe-ovsdbserver-nb\") pod \"d40dd511-9540-43b0-b019-9d58ecb9a4fe\" (UID: \"d40dd511-9540-43b0-b019-9d58ecb9a4fe\") " Oct 11 04:12:03 crc kubenswrapper[4798]: I1011 04:12:03.562014 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d40dd511-9540-43b0-b019-9d58ecb9a4fe-kube-api-access-rp9p8" (OuterVolumeSpecName: "kube-api-access-rp9p8") pod "d40dd511-9540-43b0-b019-9d58ecb9a4fe" (UID: "d40dd511-9540-43b0-b019-9d58ecb9a4fe"). InnerVolumeSpecName "kube-api-access-rp9p8". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 04:12:03 crc kubenswrapper[4798]: I1011 04:12:03.568730 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-btwq2" event={"ID":"c7e46251-4b37-40c6-adbc-877857e4442b","Type":"ContainerStarted","Data":"5b05062cc436b6c50869a63e442b96759ab9057beac4469a2039e02cb3aeddd4"} Oct 11 04:12:03 crc kubenswrapper[4798]: I1011 04:12:03.571879 4798 generic.go:334] "Generic (PLEG): container finished" podID="0020f7ba-0c70-4bcf-90a8-68729a98025c" containerID="abc3e13a6d773001b3bd58449bbbcf5570eaf4a6c943b999d4fcc9c9b57c066f" exitCode=0 Oct 11 04:12:03 crc kubenswrapper[4798]: I1011 04:12:03.571933 4798 generic.go:334] "Generic (PLEG): container finished" podID="0020f7ba-0c70-4bcf-90a8-68729a98025c" containerID="71a71593f08fe793927ee7efa2ee7dde883b2df0ced200ba8c2bb87304a7e14b" exitCode=0 Oct 11 04:12:03 crc kubenswrapper[4798]: I1011 04:12:03.571969 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0020f7ba-0c70-4bcf-90a8-68729a98025c","Type":"ContainerDied","Data":"abc3e13a6d773001b3bd58449bbbcf5570eaf4a6c943b999d4fcc9c9b57c066f"} Oct 11 04:12:03 crc kubenswrapper[4798]: I1011 04:12:03.572291 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0020f7ba-0c70-4bcf-90a8-68729a98025c","Type":"ContainerDied","Data":"71a71593f08fe793927ee7efa2ee7dde883b2df0ced200ba8c2bb87304a7e14b"} Oct 11 04:12:03 crc kubenswrapper[4798]: I1011 04:12:03.574057 4798 generic.go:334] "Generic (PLEG): container finished" podID="d40dd511-9540-43b0-b019-9d58ecb9a4fe" containerID="b1b32daac8bca48b9ec69c30e43e9bd7135e20631e212757ee0d99e68e9b0639" exitCode=0 Oct 11 04:12:03 crc kubenswrapper[4798]: I1011 04:12:03.574242 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5b6dbdb6f5-w26gm" event={"ID":"d40dd511-9540-43b0-b019-9d58ecb9a4fe","Type":"ContainerDied","Data":"b1b32daac8bca48b9ec69c30e43e9bd7135e20631e212757ee0d99e68e9b0639"} Oct 11 04:12:03 crc kubenswrapper[4798]: I1011 04:12:03.574875 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5b6dbdb6f5-w26gm" event={"ID":"d40dd511-9540-43b0-b019-9d58ecb9a4fe","Type":"ContainerDied","Data":"ffe0614ecc04f9b1d5edacd70c87c02091096cd912e047f25fb30ef0e6a2dd97"} Oct 11 04:12:03 crc kubenswrapper[4798]: I1011 04:12:03.574893 4798 scope.go:117] "RemoveContainer" containerID="b1b32daac8bca48b9ec69c30e43e9bd7135e20631e212757ee0d99e68e9b0639" Oct 11 04:12:03 crc kubenswrapper[4798]: I1011 04:12:03.574294 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5b6dbdb6f5-w26gm" Oct 11 04:12:03 crc kubenswrapper[4798]: I1011 04:12:03.593699 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-db-sync-btwq2" podStartSLOduration=5.053580802 podStartE2EDuration="41.593682318s" podCreationTimestamp="2025-10-11 04:11:22 +0000 UTC" firstStartedPulling="2025-10-11 04:11:25.289861523 +0000 UTC m=+980.626151209" lastFinishedPulling="2025-10-11 04:12:01.829963039 +0000 UTC m=+1017.166252725" observedRunningTime="2025-10-11 04:12:03.591946006 +0000 UTC m=+1018.928235702" watchObservedRunningTime="2025-10-11 04:12:03.593682318 +0000 UTC m=+1018.929972004" Oct 11 04:12:03 crc kubenswrapper[4798]: I1011 04:12:03.607121 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d40dd511-9540-43b0-b019-9d58ecb9a4fe-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "d40dd511-9540-43b0-b019-9d58ecb9a4fe" (UID: "d40dd511-9540-43b0-b019-9d58ecb9a4fe"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 04:12:03 crc kubenswrapper[4798]: I1011 04:12:03.623576 4798 scope.go:117] "RemoveContainer" containerID="c8d059d98465fae0ff674811da11fb9a7c1d6c703188ffda63d6088ba539b311" Oct 11 04:12:03 crc kubenswrapper[4798]: I1011 04:12:03.632322 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d40dd511-9540-43b0-b019-9d58ecb9a4fe-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "d40dd511-9540-43b0-b019-9d58ecb9a4fe" (UID: "d40dd511-9540-43b0-b019-9d58ecb9a4fe"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 04:12:03 crc kubenswrapper[4798]: I1011 04:12:03.635869 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d40dd511-9540-43b0-b019-9d58ecb9a4fe-config" (OuterVolumeSpecName: "config") pod "d40dd511-9540-43b0-b019-9d58ecb9a4fe" (UID: "d40dd511-9540-43b0-b019-9d58ecb9a4fe"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 04:12:03 crc kubenswrapper[4798]: I1011 04:12:03.649717 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d40dd511-9540-43b0-b019-9d58ecb9a4fe-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "d40dd511-9540-43b0-b019-9d58ecb9a4fe" (UID: "d40dd511-9540-43b0-b019-9d58ecb9a4fe"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 04:12:03 crc kubenswrapper[4798]: I1011 04:12:03.654084 4798 scope.go:117] "RemoveContainer" containerID="b1b32daac8bca48b9ec69c30e43e9bd7135e20631e212757ee0d99e68e9b0639" Oct 11 04:12:03 crc kubenswrapper[4798]: E1011 04:12:03.654681 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b1b32daac8bca48b9ec69c30e43e9bd7135e20631e212757ee0d99e68e9b0639\": container with ID starting with b1b32daac8bca48b9ec69c30e43e9bd7135e20631e212757ee0d99e68e9b0639 not found: ID does not exist" containerID="b1b32daac8bca48b9ec69c30e43e9bd7135e20631e212757ee0d99e68e9b0639" Oct 11 04:12:03 crc kubenswrapper[4798]: I1011 04:12:03.654740 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b1b32daac8bca48b9ec69c30e43e9bd7135e20631e212757ee0d99e68e9b0639"} err="failed to get container status \"b1b32daac8bca48b9ec69c30e43e9bd7135e20631e212757ee0d99e68e9b0639\": rpc error: code = NotFound desc = could not find container \"b1b32daac8bca48b9ec69c30e43e9bd7135e20631e212757ee0d99e68e9b0639\": container with ID starting with b1b32daac8bca48b9ec69c30e43e9bd7135e20631e212757ee0d99e68e9b0639 not found: ID does not exist" Oct 11 04:12:03 crc kubenswrapper[4798]: I1011 04:12:03.654775 4798 scope.go:117] "RemoveContainer" containerID="c8d059d98465fae0ff674811da11fb9a7c1d6c703188ffda63d6088ba539b311" Oct 11 04:12:03 crc kubenswrapper[4798]: E1011 04:12:03.655092 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c8d059d98465fae0ff674811da11fb9a7c1d6c703188ffda63d6088ba539b311\": container with ID starting with c8d059d98465fae0ff674811da11fb9a7c1d6c703188ffda63d6088ba539b311 not found: ID does not exist" containerID="c8d059d98465fae0ff674811da11fb9a7c1d6c703188ffda63d6088ba539b311" Oct 11 04:12:03 crc kubenswrapper[4798]: I1011 04:12:03.655117 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c8d059d98465fae0ff674811da11fb9a7c1d6c703188ffda63d6088ba539b311"} err="failed to get container status \"c8d059d98465fae0ff674811da11fb9a7c1d6c703188ffda63d6088ba539b311\": rpc error: code = NotFound desc = could not find container \"c8d059d98465fae0ff674811da11fb9a7c1d6c703188ffda63d6088ba539b311\": container with ID starting with c8d059d98465fae0ff674811da11fb9a7c1d6c703188ffda63d6088ba539b311 not found: ID does not exist" Oct 11 04:12:03 crc kubenswrapper[4798]: I1011 04:12:03.658977 4798 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d40dd511-9540-43b0-b019-9d58ecb9a4fe-dns-svc\") on node \"crc\" DevicePath \"\"" Oct 11 04:12:03 crc kubenswrapper[4798]: I1011 04:12:03.659174 4798 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d40dd511-9540-43b0-b019-9d58ecb9a4fe-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Oct 11 04:12:03 crc kubenswrapper[4798]: I1011 04:12:03.659195 4798 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d40dd511-9540-43b0-b019-9d58ecb9a4fe-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Oct 11 04:12:03 crc kubenswrapper[4798]: I1011 04:12:03.659212 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rp9p8\" (UniqueName: \"kubernetes.io/projected/d40dd511-9540-43b0-b019-9d58ecb9a4fe-kube-api-access-rp9p8\") on node \"crc\" DevicePath \"\"" Oct 11 04:12:03 crc kubenswrapper[4798]: I1011 04:12:03.659225 4798 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d40dd511-9540-43b0-b019-9d58ecb9a4fe-config\") on node \"crc\" DevicePath \"\"" Oct 11 04:12:03 crc kubenswrapper[4798]: I1011 04:12:03.904254 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5b6dbdb6f5-w26gm"] Oct 11 04:12:03 crc kubenswrapper[4798]: I1011 04:12:03.910853 4798 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5b6dbdb6f5-w26gm"] Oct 11 04:12:04 crc kubenswrapper[4798]: I1011 04:12:04.217783 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-7466b4ffd-mfb97" Oct 11 04:12:04 crc kubenswrapper[4798]: I1011 04:12:04.972029 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-c67f5f784-ph78z" Oct 11 04:12:04 crc kubenswrapper[4798]: I1011 04:12:04.973574 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-c67f5f784-ph78z" Oct 11 04:12:05 crc kubenswrapper[4798]: I1011 04:12:05.435209 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d40dd511-9540-43b0-b019-9d58ecb9a4fe" path="/var/lib/kubelet/pods/d40dd511-9540-43b0-b019-9d58ecb9a4fe/volumes" Oct 11 04:12:06 crc kubenswrapper[4798]: I1011 04:12:06.070180 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Oct 11 04:12:06 crc kubenswrapper[4798]: I1011 04:12:06.200570 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6ptmg\" (UniqueName: \"kubernetes.io/projected/0020f7ba-0c70-4bcf-90a8-68729a98025c-kube-api-access-6ptmg\") pod \"0020f7ba-0c70-4bcf-90a8-68729a98025c\" (UID: \"0020f7ba-0c70-4bcf-90a8-68729a98025c\") " Oct 11 04:12:06 crc kubenswrapper[4798]: I1011 04:12:06.200680 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0020f7ba-0c70-4bcf-90a8-68729a98025c-log-httpd\") pod \"0020f7ba-0c70-4bcf-90a8-68729a98025c\" (UID: \"0020f7ba-0c70-4bcf-90a8-68729a98025c\") " Oct 11 04:12:06 crc kubenswrapper[4798]: I1011 04:12:06.200771 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0020f7ba-0c70-4bcf-90a8-68729a98025c-run-httpd\") pod \"0020f7ba-0c70-4bcf-90a8-68729a98025c\" (UID: \"0020f7ba-0c70-4bcf-90a8-68729a98025c\") " Oct 11 04:12:06 crc kubenswrapper[4798]: I1011 04:12:06.200798 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0020f7ba-0c70-4bcf-90a8-68729a98025c-combined-ca-bundle\") pod \"0020f7ba-0c70-4bcf-90a8-68729a98025c\" (UID: \"0020f7ba-0c70-4bcf-90a8-68729a98025c\") " Oct 11 04:12:06 crc kubenswrapper[4798]: I1011 04:12:06.200829 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0020f7ba-0c70-4bcf-90a8-68729a98025c-scripts\") pod \"0020f7ba-0c70-4bcf-90a8-68729a98025c\" (UID: \"0020f7ba-0c70-4bcf-90a8-68729a98025c\") " Oct 11 04:12:06 crc kubenswrapper[4798]: I1011 04:12:06.200874 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/0020f7ba-0c70-4bcf-90a8-68729a98025c-sg-core-conf-yaml\") pod \"0020f7ba-0c70-4bcf-90a8-68729a98025c\" (UID: \"0020f7ba-0c70-4bcf-90a8-68729a98025c\") " Oct 11 04:12:06 crc kubenswrapper[4798]: I1011 04:12:06.200917 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0020f7ba-0c70-4bcf-90a8-68729a98025c-config-data\") pod \"0020f7ba-0c70-4bcf-90a8-68729a98025c\" (UID: \"0020f7ba-0c70-4bcf-90a8-68729a98025c\") " Oct 11 04:12:06 crc kubenswrapper[4798]: I1011 04:12:06.201376 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0020f7ba-0c70-4bcf-90a8-68729a98025c-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "0020f7ba-0c70-4bcf-90a8-68729a98025c" (UID: "0020f7ba-0c70-4bcf-90a8-68729a98025c"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 04:12:06 crc kubenswrapper[4798]: I1011 04:12:06.201446 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0020f7ba-0c70-4bcf-90a8-68729a98025c-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "0020f7ba-0c70-4bcf-90a8-68729a98025c" (UID: "0020f7ba-0c70-4bcf-90a8-68729a98025c"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 04:12:06 crc kubenswrapper[4798]: I1011 04:12:06.208910 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0020f7ba-0c70-4bcf-90a8-68729a98025c-scripts" (OuterVolumeSpecName: "scripts") pod "0020f7ba-0c70-4bcf-90a8-68729a98025c" (UID: "0020f7ba-0c70-4bcf-90a8-68729a98025c"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:12:06 crc kubenswrapper[4798]: I1011 04:12:06.214663 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0020f7ba-0c70-4bcf-90a8-68729a98025c-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "0020f7ba-0c70-4bcf-90a8-68729a98025c" (UID: "0020f7ba-0c70-4bcf-90a8-68729a98025c"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:12:06 crc kubenswrapper[4798]: I1011 04:12:06.215013 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0020f7ba-0c70-4bcf-90a8-68729a98025c-kube-api-access-6ptmg" (OuterVolumeSpecName: "kube-api-access-6ptmg") pod "0020f7ba-0c70-4bcf-90a8-68729a98025c" (UID: "0020f7ba-0c70-4bcf-90a8-68729a98025c"). InnerVolumeSpecName "kube-api-access-6ptmg". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 04:12:06 crc kubenswrapper[4798]: I1011 04:12:06.281613 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0020f7ba-0c70-4bcf-90a8-68729a98025c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0020f7ba-0c70-4bcf-90a8-68729a98025c" (UID: "0020f7ba-0c70-4bcf-90a8-68729a98025c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:12:06 crc kubenswrapper[4798]: I1011 04:12:06.296008 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0020f7ba-0c70-4bcf-90a8-68729a98025c-config-data" (OuterVolumeSpecName: "config-data") pod "0020f7ba-0c70-4bcf-90a8-68729a98025c" (UID: "0020f7ba-0c70-4bcf-90a8-68729a98025c"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:12:06 crc kubenswrapper[4798]: I1011 04:12:06.303379 4798 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0020f7ba-0c70-4bcf-90a8-68729a98025c-config-data\") on node \"crc\" DevicePath \"\"" Oct 11 04:12:06 crc kubenswrapper[4798]: I1011 04:12:06.303425 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6ptmg\" (UniqueName: \"kubernetes.io/projected/0020f7ba-0c70-4bcf-90a8-68729a98025c-kube-api-access-6ptmg\") on node \"crc\" DevicePath \"\"" Oct 11 04:12:06 crc kubenswrapper[4798]: I1011 04:12:06.303436 4798 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0020f7ba-0c70-4bcf-90a8-68729a98025c-log-httpd\") on node \"crc\" DevicePath \"\"" Oct 11 04:12:06 crc kubenswrapper[4798]: I1011 04:12:06.303444 4798 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0020f7ba-0c70-4bcf-90a8-68729a98025c-run-httpd\") on node \"crc\" DevicePath \"\"" Oct 11 04:12:06 crc kubenswrapper[4798]: I1011 04:12:06.303455 4798 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0020f7ba-0c70-4bcf-90a8-68729a98025c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 11 04:12:06 crc kubenswrapper[4798]: I1011 04:12:06.303463 4798 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0020f7ba-0c70-4bcf-90a8-68729a98025c-scripts\") on node \"crc\" DevicePath \"\"" Oct 11 04:12:06 crc kubenswrapper[4798]: I1011 04:12:06.303471 4798 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/0020f7ba-0c70-4bcf-90a8-68729a98025c-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Oct 11 04:12:06 crc kubenswrapper[4798]: I1011 04:12:06.610089 4798 generic.go:334] "Generic (PLEG): container finished" podID="0020f7ba-0c70-4bcf-90a8-68729a98025c" containerID="81b3e9df75092a3efda3ab28bcc5bdcc08af56888948c5568fc5e4c04810cc9f" exitCode=0 Oct 11 04:12:06 crc kubenswrapper[4798]: I1011 04:12:06.610208 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Oct 11 04:12:06 crc kubenswrapper[4798]: I1011 04:12:06.610188 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0020f7ba-0c70-4bcf-90a8-68729a98025c","Type":"ContainerDied","Data":"81b3e9df75092a3efda3ab28bcc5bdcc08af56888948c5568fc5e4c04810cc9f"} Oct 11 04:12:06 crc kubenswrapper[4798]: I1011 04:12:06.610661 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0020f7ba-0c70-4bcf-90a8-68729a98025c","Type":"ContainerDied","Data":"f5adbe3bfcc9a4a4158383b37266b4322e7399f938013e2801ca7ac3049fb7fc"} Oct 11 04:12:06 crc kubenswrapper[4798]: I1011 04:12:06.610724 4798 scope.go:117] "RemoveContainer" containerID="abc3e13a6d773001b3bd58449bbbcf5570eaf4a6c943b999d4fcc9c9b57c066f" Oct 11 04:12:06 crc kubenswrapper[4798]: I1011 04:12:06.655049 4798 scope.go:117] "RemoveContainer" containerID="81b3e9df75092a3efda3ab28bcc5bdcc08af56888948c5568fc5e4c04810cc9f" Oct 11 04:12:06 crc kubenswrapper[4798]: I1011 04:12:06.696247 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Oct 11 04:12:06 crc kubenswrapper[4798]: I1011 04:12:06.718795 4798 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Oct 11 04:12:06 crc kubenswrapper[4798]: I1011 04:12:06.731946 4798 scope.go:117] "RemoveContainer" containerID="71a71593f08fe793927ee7efa2ee7dde883b2df0ced200ba8c2bb87304a7e14b" Oct 11 04:12:06 crc kubenswrapper[4798]: I1011 04:12:06.741497 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Oct 11 04:12:06 crc kubenswrapper[4798]: E1011 04:12:06.742214 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0020f7ba-0c70-4bcf-90a8-68729a98025c" containerName="proxy-httpd" Oct 11 04:12:06 crc kubenswrapper[4798]: I1011 04:12:06.742230 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="0020f7ba-0c70-4bcf-90a8-68729a98025c" containerName="proxy-httpd" Oct 11 04:12:06 crc kubenswrapper[4798]: E1011 04:12:06.742244 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0020f7ba-0c70-4bcf-90a8-68729a98025c" containerName="ceilometer-central-agent" Oct 11 04:12:06 crc kubenswrapper[4798]: I1011 04:12:06.742250 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="0020f7ba-0c70-4bcf-90a8-68729a98025c" containerName="ceilometer-central-agent" Oct 11 04:12:06 crc kubenswrapper[4798]: E1011 04:12:06.742263 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d40dd511-9540-43b0-b019-9d58ecb9a4fe" containerName="init" Oct 11 04:12:06 crc kubenswrapper[4798]: I1011 04:12:06.742269 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="d40dd511-9540-43b0-b019-9d58ecb9a4fe" containerName="init" Oct 11 04:12:06 crc kubenswrapper[4798]: E1011 04:12:06.742279 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d40dd511-9540-43b0-b019-9d58ecb9a4fe" containerName="dnsmasq-dns" Oct 11 04:12:06 crc kubenswrapper[4798]: I1011 04:12:06.742285 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="d40dd511-9540-43b0-b019-9d58ecb9a4fe" containerName="dnsmasq-dns" Oct 11 04:12:06 crc kubenswrapper[4798]: E1011 04:12:06.742314 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0020f7ba-0c70-4bcf-90a8-68729a98025c" containerName="ceilometer-notification-agent" Oct 11 04:12:06 crc kubenswrapper[4798]: I1011 04:12:06.742321 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="0020f7ba-0c70-4bcf-90a8-68729a98025c" containerName="ceilometer-notification-agent" Oct 11 04:12:06 crc kubenswrapper[4798]: I1011 04:12:06.742506 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="0020f7ba-0c70-4bcf-90a8-68729a98025c" containerName="ceilometer-central-agent" Oct 11 04:12:06 crc kubenswrapper[4798]: I1011 04:12:06.742519 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="d40dd511-9540-43b0-b019-9d58ecb9a4fe" containerName="dnsmasq-dns" Oct 11 04:12:06 crc kubenswrapper[4798]: I1011 04:12:06.742542 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="0020f7ba-0c70-4bcf-90a8-68729a98025c" containerName="ceilometer-notification-agent" Oct 11 04:12:06 crc kubenswrapper[4798]: I1011 04:12:06.742554 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="0020f7ba-0c70-4bcf-90a8-68729a98025c" containerName="proxy-httpd" Oct 11 04:12:06 crc kubenswrapper[4798]: I1011 04:12:06.744157 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Oct 11 04:12:06 crc kubenswrapper[4798]: I1011 04:12:06.747272 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Oct 11 04:12:06 crc kubenswrapper[4798]: I1011 04:12:06.747762 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Oct 11 04:12:06 crc kubenswrapper[4798]: I1011 04:12:06.751197 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Oct 11 04:12:06 crc kubenswrapper[4798]: I1011 04:12:06.815293 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mlwh2\" (UniqueName: \"kubernetes.io/projected/dbe7cb03-b84f-4e72-a995-8d364b9d53ea-kube-api-access-mlwh2\") pod \"ceilometer-0\" (UID: \"dbe7cb03-b84f-4e72-a995-8d364b9d53ea\") " pod="openstack/ceilometer-0" Oct 11 04:12:06 crc kubenswrapper[4798]: I1011 04:12:06.815352 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/dbe7cb03-b84f-4e72-a995-8d364b9d53ea-scripts\") pod \"ceilometer-0\" (UID: \"dbe7cb03-b84f-4e72-a995-8d364b9d53ea\") " pod="openstack/ceilometer-0" Oct 11 04:12:06 crc kubenswrapper[4798]: I1011 04:12:06.815405 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/dbe7cb03-b84f-4e72-a995-8d364b9d53ea-run-httpd\") pod \"ceilometer-0\" (UID: \"dbe7cb03-b84f-4e72-a995-8d364b9d53ea\") " pod="openstack/ceilometer-0" Oct 11 04:12:06 crc kubenswrapper[4798]: I1011 04:12:06.815445 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dbe7cb03-b84f-4e72-a995-8d364b9d53ea-config-data\") pod \"ceilometer-0\" (UID: \"dbe7cb03-b84f-4e72-a995-8d364b9d53ea\") " pod="openstack/ceilometer-0" Oct 11 04:12:06 crc kubenswrapper[4798]: I1011 04:12:06.815468 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dbe7cb03-b84f-4e72-a995-8d364b9d53ea-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"dbe7cb03-b84f-4e72-a995-8d364b9d53ea\") " pod="openstack/ceilometer-0" Oct 11 04:12:06 crc kubenswrapper[4798]: I1011 04:12:06.815581 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/dbe7cb03-b84f-4e72-a995-8d364b9d53ea-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"dbe7cb03-b84f-4e72-a995-8d364b9d53ea\") " pod="openstack/ceilometer-0" Oct 11 04:12:06 crc kubenswrapper[4798]: I1011 04:12:06.815620 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/dbe7cb03-b84f-4e72-a995-8d364b9d53ea-log-httpd\") pod \"ceilometer-0\" (UID: \"dbe7cb03-b84f-4e72-a995-8d364b9d53ea\") " pod="openstack/ceilometer-0" Oct 11 04:12:06 crc kubenswrapper[4798]: I1011 04:12:06.821871 4798 scope.go:117] "RemoveContainer" containerID="abc3e13a6d773001b3bd58449bbbcf5570eaf4a6c943b999d4fcc9c9b57c066f" Oct 11 04:12:06 crc kubenswrapper[4798]: E1011 04:12:06.822551 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"abc3e13a6d773001b3bd58449bbbcf5570eaf4a6c943b999d4fcc9c9b57c066f\": container with ID starting with abc3e13a6d773001b3bd58449bbbcf5570eaf4a6c943b999d4fcc9c9b57c066f not found: ID does not exist" containerID="abc3e13a6d773001b3bd58449bbbcf5570eaf4a6c943b999d4fcc9c9b57c066f" Oct 11 04:12:06 crc kubenswrapper[4798]: I1011 04:12:06.822580 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"abc3e13a6d773001b3bd58449bbbcf5570eaf4a6c943b999d4fcc9c9b57c066f"} err="failed to get container status \"abc3e13a6d773001b3bd58449bbbcf5570eaf4a6c943b999d4fcc9c9b57c066f\": rpc error: code = NotFound desc = could not find container \"abc3e13a6d773001b3bd58449bbbcf5570eaf4a6c943b999d4fcc9c9b57c066f\": container with ID starting with abc3e13a6d773001b3bd58449bbbcf5570eaf4a6c943b999d4fcc9c9b57c066f not found: ID does not exist" Oct 11 04:12:06 crc kubenswrapper[4798]: I1011 04:12:06.822602 4798 scope.go:117] "RemoveContainer" containerID="81b3e9df75092a3efda3ab28bcc5bdcc08af56888948c5568fc5e4c04810cc9f" Oct 11 04:12:06 crc kubenswrapper[4798]: E1011 04:12:06.823052 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"81b3e9df75092a3efda3ab28bcc5bdcc08af56888948c5568fc5e4c04810cc9f\": container with ID starting with 81b3e9df75092a3efda3ab28bcc5bdcc08af56888948c5568fc5e4c04810cc9f not found: ID does not exist" containerID="81b3e9df75092a3efda3ab28bcc5bdcc08af56888948c5568fc5e4c04810cc9f" Oct 11 04:12:06 crc kubenswrapper[4798]: I1011 04:12:06.823075 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"81b3e9df75092a3efda3ab28bcc5bdcc08af56888948c5568fc5e4c04810cc9f"} err="failed to get container status \"81b3e9df75092a3efda3ab28bcc5bdcc08af56888948c5568fc5e4c04810cc9f\": rpc error: code = NotFound desc = could not find container \"81b3e9df75092a3efda3ab28bcc5bdcc08af56888948c5568fc5e4c04810cc9f\": container with ID starting with 81b3e9df75092a3efda3ab28bcc5bdcc08af56888948c5568fc5e4c04810cc9f not found: ID does not exist" Oct 11 04:12:06 crc kubenswrapper[4798]: I1011 04:12:06.823088 4798 scope.go:117] "RemoveContainer" containerID="71a71593f08fe793927ee7efa2ee7dde883b2df0ced200ba8c2bb87304a7e14b" Oct 11 04:12:06 crc kubenswrapper[4798]: E1011 04:12:06.823510 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"71a71593f08fe793927ee7efa2ee7dde883b2df0ced200ba8c2bb87304a7e14b\": container with ID starting with 71a71593f08fe793927ee7efa2ee7dde883b2df0ced200ba8c2bb87304a7e14b not found: ID does not exist" containerID="71a71593f08fe793927ee7efa2ee7dde883b2df0ced200ba8c2bb87304a7e14b" Oct 11 04:12:06 crc kubenswrapper[4798]: I1011 04:12:06.823531 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"71a71593f08fe793927ee7efa2ee7dde883b2df0ced200ba8c2bb87304a7e14b"} err="failed to get container status \"71a71593f08fe793927ee7efa2ee7dde883b2df0ced200ba8c2bb87304a7e14b\": rpc error: code = NotFound desc = could not find container \"71a71593f08fe793927ee7efa2ee7dde883b2df0ced200ba8c2bb87304a7e14b\": container with ID starting with 71a71593f08fe793927ee7efa2ee7dde883b2df0ced200ba8c2bb87304a7e14b not found: ID does not exist" Oct 11 04:12:06 crc kubenswrapper[4798]: I1011 04:12:06.917625 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mlwh2\" (UniqueName: \"kubernetes.io/projected/dbe7cb03-b84f-4e72-a995-8d364b9d53ea-kube-api-access-mlwh2\") pod \"ceilometer-0\" (UID: \"dbe7cb03-b84f-4e72-a995-8d364b9d53ea\") " pod="openstack/ceilometer-0" Oct 11 04:12:06 crc kubenswrapper[4798]: I1011 04:12:06.917699 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/dbe7cb03-b84f-4e72-a995-8d364b9d53ea-scripts\") pod \"ceilometer-0\" (UID: \"dbe7cb03-b84f-4e72-a995-8d364b9d53ea\") " pod="openstack/ceilometer-0" Oct 11 04:12:06 crc kubenswrapper[4798]: I1011 04:12:06.917722 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/dbe7cb03-b84f-4e72-a995-8d364b9d53ea-run-httpd\") pod \"ceilometer-0\" (UID: \"dbe7cb03-b84f-4e72-a995-8d364b9d53ea\") " pod="openstack/ceilometer-0" Oct 11 04:12:06 crc kubenswrapper[4798]: I1011 04:12:06.918377 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/dbe7cb03-b84f-4e72-a995-8d364b9d53ea-run-httpd\") pod \"ceilometer-0\" (UID: \"dbe7cb03-b84f-4e72-a995-8d364b9d53ea\") " pod="openstack/ceilometer-0" Oct 11 04:12:06 crc kubenswrapper[4798]: I1011 04:12:06.918668 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dbe7cb03-b84f-4e72-a995-8d364b9d53ea-config-data\") pod \"ceilometer-0\" (UID: \"dbe7cb03-b84f-4e72-a995-8d364b9d53ea\") " pod="openstack/ceilometer-0" Oct 11 04:12:06 crc kubenswrapper[4798]: I1011 04:12:06.918699 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dbe7cb03-b84f-4e72-a995-8d364b9d53ea-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"dbe7cb03-b84f-4e72-a995-8d364b9d53ea\") " pod="openstack/ceilometer-0" Oct 11 04:12:06 crc kubenswrapper[4798]: I1011 04:12:06.918733 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/dbe7cb03-b84f-4e72-a995-8d364b9d53ea-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"dbe7cb03-b84f-4e72-a995-8d364b9d53ea\") " pod="openstack/ceilometer-0" Oct 11 04:12:06 crc kubenswrapper[4798]: I1011 04:12:06.918750 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/dbe7cb03-b84f-4e72-a995-8d364b9d53ea-log-httpd\") pod \"ceilometer-0\" (UID: \"dbe7cb03-b84f-4e72-a995-8d364b9d53ea\") " pod="openstack/ceilometer-0" Oct 11 04:12:06 crc kubenswrapper[4798]: I1011 04:12:06.919147 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/dbe7cb03-b84f-4e72-a995-8d364b9d53ea-log-httpd\") pod \"ceilometer-0\" (UID: \"dbe7cb03-b84f-4e72-a995-8d364b9d53ea\") " pod="openstack/ceilometer-0" Oct 11 04:12:06 crc kubenswrapper[4798]: I1011 04:12:06.923301 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/dbe7cb03-b84f-4e72-a995-8d364b9d53ea-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"dbe7cb03-b84f-4e72-a995-8d364b9d53ea\") " pod="openstack/ceilometer-0" Oct 11 04:12:06 crc kubenswrapper[4798]: I1011 04:12:06.924849 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dbe7cb03-b84f-4e72-a995-8d364b9d53ea-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"dbe7cb03-b84f-4e72-a995-8d364b9d53ea\") " pod="openstack/ceilometer-0" Oct 11 04:12:06 crc kubenswrapper[4798]: I1011 04:12:06.924946 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dbe7cb03-b84f-4e72-a995-8d364b9d53ea-config-data\") pod \"ceilometer-0\" (UID: \"dbe7cb03-b84f-4e72-a995-8d364b9d53ea\") " pod="openstack/ceilometer-0" Oct 11 04:12:06 crc kubenswrapper[4798]: I1011 04:12:06.925361 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/dbe7cb03-b84f-4e72-a995-8d364b9d53ea-scripts\") pod \"ceilometer-0\" (UID: \"dbe7cb03-b84f-4e72-a995-8d364b9d53ea\") " pod="openstack/ceilometer-0" Oct 11 04:12:06 crc kubenswrapper[4798]: I1011 04:12:06.943278 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mlwh2\" (UniqueName: \"kubernetes.io/projected/dbe7cb03-b84f-4e72-a995-8d364b9d53ea-kube-api-access-mlwh2\") pod \"ceilometer-0\" (UID: \"dbe7cb03-b84f-4e72-a995-8d364b9d53ea\") " pod="openstack/ceilometer-0" Oct 11 04:12:07 crc kubenswrapper[4798]: I1011 04:12:07.104934 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Oct 11 04:12:07 crc kubenswrapper[4798]: I1011 04:12:07.436517 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0020f7ba-0c70-4bcf-90a8-68729a98025c" path="/var/lib/kubelet/pods/0020f7ba-0c70-4bcf-90a8-68729a98025c/volumes" Oct 11 04:12:07 crc kubenswrapper[4798]: I1011 04:12:07.575211 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Oct 11 04:12:07 crc kubenswrapper[4798]: W1011 04:12:07.582834 4798 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poddbe7cb03_b84f_4e72_a995_8d364b9d53ea.slice/crio-7fd3d69349d9c24f3203595a2e3fcd997f21d1feb62881b16c6962635a9bdbc8 WatchSource:0}: Error finding container 7fd3d69349d9c24f3203595a2e3fcd997f21d1feb62881b16c6962635a9bdbc8: Status 404 returned error can't find the container with id 7fd3d69349d9c24f3203595a2e3fcd997f21d1feb62881b16c6962635a9bdbc8 Oct 11 04:12:07 crc kubenswrapper[4798]: I1011 04:12:07.620420 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"dbe7cb03-b84f-4e72-a995-8d364b9d53ea","Type":"ContainerStarted","Data":"7fd3d69349d9c24f3203595a2e3fcd997f21d1feb62881b16c6962635a9bdbc8"} Oct 11 04:12:08 crc kubenswrapper[4798]: I1011 04:12:08.653266 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"dbe7cb03-b84f-4e72-a995-8d364b9d53ea","Type":"ContainerStarted","Data":"37b3342c810f4b45d07c05ba69dc2f6742bb593adfbe062d5979768c9f30129b"} Oct 11 04:12:08 crc kubenswrapper[4798]: I1011 04:12:08.662567 4798 generic.go:334] "Generic (PLEG): container finished" podID="c7e46251-4b37-40c6-adbc-877857e4442b" containerID="5b05062cc436b6c50869a63e442b96759ab9057beac4469a2039e02cb3aeddd4" exitCode=0 Oct 11 04:12:08 crc kubenswrapper[4798]: I1011 04:12:08.662641 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-btwq2" event={"ID":"c7e46251-4b37-40c6-adbc-877857e4442b","Type":"ContainerDied","Data":"5b05062cc436b6c50869a63e442b96759ab9057beac4469a2039e02cb3aeddd4"} Oct 11 04:12:09 crc kubenswrapper[4798]: I1011 04:12:09.671185 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"dbe7cb03-b84f-4e72-a995-8d364b9d53ea","Type":"ContainerStarted","Data":"2c14c2de63d9fcb167f6decea44733514f16c80c8b09924307acc28744a7c953"} Oct 11 04:12:09 crc kubenswrapper[4798]: I1011 04:12:09.671641 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"dbe7cb03-b84f-4e72-a995-8d364b9d53ea","Type":"ContainerStarted","Data":"e8201f0bc7bc38e4ca139682da4e743f7d3b9187fcda29f1f26759b5d46e51eb"} Oct 11 04:12:10 crc kubenswrapper[4798]: I1011 04:12:10.026950 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-btwq2" Oct 11 04:12:10 crc kubenswrapper[4798]: I1011 04:12:10.181218 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c7e46251-4b37-40c6-adbc-877857e4442b-scripts\") pod \"c7e46251-4b37-40c6-adbc-877857e4442b\" (UID: \"c7e46251-4b37-40c6-adbc-877857e4442b\") " Oct 11 04:12:10 crc kubenswrapper[4798]: I1011 04:12:10.181378 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c7e46251-4b37-40c6-adbc-877857e4442b-config-data\") pod \"c7e46251-4b37-40c6-adbc-877857e4442b\" (UID: \"c7e46251-4b37-40c6-adbc-877857e4442b\") " Oct 11 04:12:10 crc kubenswrapper[4798]: I1011 04:12:10.181461 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tc445\" (UniqueName: \"kubernetes.io/projected/c7e46251-4b37-40c6-adbc-877857e4442b-kube-api-access-tc445\") pod \"c7e46251-4b37-40c6-adbc-877857e4442b\" (UID: \"c7e46251-4b37-40c6-adbc-877857e4442b\") " Oct 11 04:12:10 crc kubenswrapper[4798]: I1011 04:12:10.181662 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/c7e46251-4b37-40c6-adbc-877857e4442b-etc-machine-id\") pod \"c7e46251-4b37-40c6-adbc-877857e4442b\" (UID: \"c7e46251-4b37-40c6-adbc-877857e4442b\") " Oct 11 04:12:10 crc kubenswrapper[4798]: I1011 04:12:10.181701 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/c7e46251-4b37-40c6-adbc-877857e4442b-db-sync-config-data\") pod \"c7e46251-4b37-40c6-adbc-877857e4442b\" (UID: \"c7e46251-4b37-40c6-adbc-877857e4442b\") " Oct 11 04:12:10 crc kubenswrapper[4798]: I1011 04:12:10.181738 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c7e46251-4b37-40c6-adbc-877857e4442b-combined-ca-bundle\") pod \"c7e46251-4b37-40c6-adbc-877857e4442b\" (UID: \"c7e46251-4b37-40c6-adbc-877857e4442b\") " Oct 11 04:12:10 crc kubenswrapper[4798]: I1011 04:12:10.182298 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/c7e46251-4b37-40c6-adbc-877857e4442b-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "c7e46251-4b37-40c6-adbc-877857e4442b" (UID: "c7e46251-4b37-40c6-adbc-877857e4442b"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 11 04:12:10 crc kubenswrapper[4798]: I1011 04:12:10.182991 4798 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/c7e46251-4b37-40c6-adbc-877857e4442b-etc-machine-id\") on node \"crc\" DevicePath \"\"" Oct 11 04:12:10 crc kubenswrapper[4798]: I1011 04:12:10.189160 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c7e46251-4b37-40c6-adbc-877857e4442b-scripts" (OuterVolumeSpecName: "scripts") pod "c7e46251-4b37-40c6-adbc-877857e4442b" (UID: "c7e46251-4b37-40c6-adbc-877857e4442b"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:12:10 crc kubenswrapper[4798]: I1011 04:12:10.190205 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c7e46251-4b37-40c6-adbc-877857e4442b-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "c7e46251-4b37-40c6-adbc-877857e4442b" (UID: "c7e46251-4b37-40c6-adbc-877857e4442b"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:12:10 crc kubenswrapper[4798]: I1011 04:12:10.190638 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c7e46251-4b37-40c6-adbc-877857e4442b-kube-api-access-tc445" (OuterVolumeSpecName: "kube-api-access-tc445") pod "c7e46251-4b37-40c6-adbc-877857e4442b" (UID: "c7e46251-4b37-40c6-adbc-877857e4442b"). InnerVolumeSpecName "kube-api-access-tc445". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 04:12:10 crc kubenswrapper[4798]: I1011 04:12:10.242805 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c7e46251-4b37-40c6-adbc-877857e4442b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c7e46251-4b37-40c6-adbc-877857e4442b" (UID: "c7e46251-4b37-40c6-adbc-877857e4442b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:12:10 crc kubenswrapper[4798]: I1011 04:12:10.281483 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c7e46251-4b37-40c6-adbc-877857e4442b-config-data" (OuterVolumeSpecName: "config-data") pod "c7e46251-4b37-40c6-adbc-877857e4442b" (UID: "c7e46251-4b37-40c6-adbc-877857e4442b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:12:10 crc kubenswrapper[4798]: I1011 04:12:10.284558 4798 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c7e46251-4b37-40c6-adbc-877857e4442b-scripts\") on node \"crc\" DevicePath \"\"" Oct 11 04:12:10 crc kubenswrapper[4798]: I1011 04:12:10.284586 4798 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c7e46251-4b37-40c6-adbc-877857e4442b-config-data\") on node \"crc\" DevicePath \"\"" Oct 11 04:12:10 crc kubenswrapper[4798]: I1011 04:12:10.284599 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tc445\" (UniqueName: \"kubernetes.io/projected/c7e46251-4b37-40c6-adbc-877857e4442b-kube-api-access-tc445\") on node \"crc\" DevicePath \"\"" Oct 11 04:12:10 crc kubenswrapper[4798]: I1011 04:12:10.284610 4798 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/c7e46251-4b37-40c6-adbc-877857e4442b-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Oct 11 04:12:10 crc kubenswrapper[4798]: I1011 04:12:10.284619 4798 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c7e46251-4b37-40c6-adbc-877857e4442b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 11 04:12:10 crc kubenswrapper[4798]: I1011 04:12:10.683793 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-btwq2" event={"ID":"c7e46251-4b37-40c6-adbc-877857e4442b","Type":"ContainerDied","Data":"077b33da092fa80665f7057ef42f321a5410358cd1f618dee0127da83b588155"} Oct 11 04:12:10 crc kubenswrapper[4798]: I1011 04:12:10.683846 4798 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="077b33da092fa80665f7057ef42f321a5410358cd1f618dee0127da83b588155" Oct 11 04:12:10 crc kubenswrapper[4798]: I1011 04:12:10.683899 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-btwq2" Oct 11 04:12:11 crc kubenswrapper[4798]: I1011 04:12:11.002952 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Oct 11 04:12:11 crc kubenswrapper[4798]: E1011 04:12:11.003701 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c7e46251-4b37-40c6-adbc-877857e4442b" containerName="cinder-db-sync" Oct 11 04:12:11 crc kubenswrapper[4798]: I1011 04:12:11.003719 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="c7e46251-4b37-40c6-adbc-877857e4442b" containerName="cinder-db-sync" Oct 11 04:12:11 crc kubenswrapper[4798]: I1011 04:12:11.003873 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="c7e46251-4b37-40c6-adbc-877857e4442b" containerName="cinder-db-sync" Oct 11 04:12:11 crc kubenswrapper[4798]: I1011 04:12:11.004808 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Oct 11 04:12:11 crc kubenswrapper[4798]: I1011 04:12:11.009423 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-rwc92" Oct 11 04:12:11 crc kubenswrapper[4798]: I1011 04:12:11.017832 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Oct 11 04:12:11 crc kubenswrapper[4798]: I1011 04:12:11.017907 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Oct 11 04:12:11 crc kubenswrapper[4798]: I1011 04:12:11.018095 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Oct 11 04:12:11 crc kubenswrapper[4798]: I1011 04:12:11.018727 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Oct 11 04:12:11 crc kubenswrapper[4798]: I1011 04:12:11.063482 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5f7f9f7cbf-rnw8v"] Oct 11 04:12:11 crc kubenswrapper[4798]: I1011 04:12:11.065373 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5f7f9f7cbf-rnw8v" Oct 11 04:12:11 crc kubenswrapper[4798]: I1011 04:12:11.102558 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5f7f9f7cbf-rnw8v"] Oct 11 04:12:11 crc kubenswrapper[4798]: I1011 04:12:11.202660 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6008f2a1-8398-4d56-a3a2-c77e8923a800-ovsdbserver-nb\") pod \"dnsmasq-dns-5f7f9f7cbf-rnw8v\" (UID: \"6008f2a1-8398-4d56-a3a2-c77e8923a800\") " pod="openstack/dnsmasq-dns-5f7f9f7cbf-rnw8v" Oct 11 04:12:11 crc kubenswrapper[4798]: I1011 04:12:11.202717 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bc36d523-b6e0-4b0e-a50f-45a173f728b1-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"bc36d523-b6e0-4b0e-a50f-45a173f728b1\") " pod="openstack/cinder-scheduler-0" Oct 11 04:12:11 crc kubenswrapper[4798]: I1011 04:12:11.202737 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2h6qq\" (UniqueName: \"kubernetes.io/projected/bc36d523-b6e0-4b0e-a50f-45a173f728b1-kube-api-access-2h6qq\") pod \"cinder-scheduler-0\" (UID: \"bc36d523-b6e0-4b0e-a50f-45a173f728b1\") " pod="openstack/cinder-scheduler-0" Oct 11 04:12:11 crc kubenswrapper[4798]: I1011 04:12:11.202759 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/bc36d523-b6e0-4b0e-a50f-45a173f728b1-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"bc36d523-b6e0-4b0e-a50f-45a173f728b1\") " pod="openstack/cinder-scheduler-0" Oct 11 04:12:11 crc kubenswrapper[4798]: I1011 04:12:11.202797 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bc36d523-b6e0-4b0e-a50f-45a173f728b1-config-data\") pod \"cinder-scheduler-0\" (UID: \"bc36d523-b6e0-4b0e-a50f-45a173f728b1\") " pod="openstack/cinder-scheduler-0" Oct 11 04:12:11 crc kubenswrapper[4798]: I1011 04:12:11.202825 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/bc36d523-b6e0-4b0e-a50f-45a173f728b1-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"bc36d523-b6e0-4b0e-a50f-45a173f728b1\") " pod="openstack/cinder-scheduler-0" Oct 11 04:12:11 crc kubenswrapper[4798]: I1011 04:12:11.202862 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6008f2a1-8398-4d56-a3a2-c77e8923a800-config\") pod \"dnsmasq-dns-5f7f9f7cbf-rnw8v\" (UID: \"6008f2a1-8398-4d56-a3a2-c77e8923a800\") " pod="openstack/dnsmasq-dns-5f7f9f7cbf-rnw8v" Oct 11 04:12:11 crc kubenswrapper[4798]: I1011 04:12:11.202895 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vsljj\" (UniqueName: \"kubernetes.io/projected/6008f2a1-8398-4d56-a3a2-c77e8923a800-kube-api-access-vsljj\") pod \"dnsmasq-dns-5f7f9f7cbf-rnw8v\" (UID: \"6008f2a1-8398-4d56-a3a2-c77e8923a800\") " pod="openstack/dnsmasq-dns-5f7f9f7cbf-rnw8v" Oct 11 04:12:11 crc kubenswrapper[4798]: I1011 04:12:11.202910 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bc36d523-b6e0-4b0e-a50f-45a173f728b1-scripts\") pod \"cinder-scheduler-0\" (UID: \"bc36d523-b6e0-4b0e-a50f-45a173f728b1\") " pod="openstack/cinder-scheduler-0" Oct 11 04:12:11 crc kubenswrapper[4798]: I1011 04:12:11.202929 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6008f2a1-8398-4d56-a3a2-c77e8923a800-ovsdbserver-sb\") pod \"dnsmasq-dns-5f7f9f7cbf-rnw8v\" (UID: \"6008f2a1-8398-4d56-a3a2-c77e8923a800\") " pod="openstack/dnsmasq-dns-5f7f9f7cbf-rnw8v" Oct 11 04:12:11 crc kubenswrapper[4798]: I1011 04:12:11.202963 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6008f2a1-8398-4d56-a3a2-c77e8923a800-dns-svc\") pod \"dnsmasq-dns-5f7f9f7cbf-rnw8v\" (UID: \"6008f2a1-8398-4d56-a3a2-c77e8923a800\") " pod="openstack/dnsmasq-dns-5f7f9f7cbf-rnw8v" Oct 11 04:12:11 crc kubenswrapper[4798]: I1011 04:12:11.276725 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Oct 11 04:12:11 crc kubenswrapper[4798]: I1011 04:12:11.280660 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Oct 11 04:12:11 crc kubenswrapper[4798]: I1011 04:12:11.282706 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Oct 11 04:12:11 crc kubenswrapper[4798]: I1011 04:12:11.292919 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Oct 11 04:12:11 crc kubenswrapper[4798]: I1011 04:12:11.304671 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6008f2a1-8398-4d56-a3a2-c77e8923a800-dns-svc\") pod \"dnsmasq-dns-5f7f9f7cbf-rnw8v\" (UID: \"6008f2a1-8398-4d56-a3a2-c77e8923a800\") " pod="openstack/dnsmasq-dns-5f7f9f7cbf-rnw8v" Oct 11 04:12:11 crc kubenswrapper[4798]: I1011 04:12:11.304738 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6008f2a1-8398-4d56-a3a2-c77e8923a800-ovsdbserver-nb\") pod \"dnsmasq-dns-5f7f9f7cbf-rnw8v\" (UID: \"6008f2a1-8398-4d56-a3a2-c77e8923a800\") " pod="openstack/dnsmasq-dns-5f7f9f7cbf-rnw8v" Oct 11 04:12:11 crc kubenswrapper[4798]: I1011 04:12:11.304768 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bc36d523-b6e0-4b0e-a50f-45a173f728b1-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"bc36d523-b6e0-4b0e-a50f-45a173f728b1\") " pod="openstack/cinder-scheduler-0" Oct 11 04:12:11 crc kubenswrapper[4798]: I1011 04:12:11.304791 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2h6qq\" (UniqueName: \"kubernetes.io/projected/bc36d523-b6e0-4b0e-a50f-45a173f728b1-kube-api-access-2h6qq\") pod \"cinder-scheduler-0\" (UID: \"bc36d523-b6e0-4b0e-a50f-45a173f728b1\") " pod="openstack/cinder-scheduler-0" Oct 11 04:12:11 crc kubenswrapper[4798]: I1011 04:12:11.304815 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/bc36d523-b6e0-4b0e-a50f-45a173f728b1-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"bc36d523-b6e0-4b0e-a50f-45a173f728b1\") " pod="openstack/cinder-scheduler-0" Oct 11 04:12:11 crc kubenswrapper[4798]: I1011 04:12:11.304851 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bc36d523-b6e0-4b0e-a50f-45a173f728b1-config-data\") pod \"cinder-scheduler-0\" (UID: \"bc36d523-b6e0-4b0e-a50f-45a173f728b1\") " pod="openstack/cinder-scheduler-0" Oct 11 04:12:11 crc kubenswrapper[4798]: I1011 04:12:11.304870 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/bc36d523-b6e0-4b0e-a50f-45a173f728b1-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"bc36d523-b6e0-4b0e-a50f-45a173f728b1\") " pod="openstack/cinder-scheduler-0" Oct 11 04:12:11 crc kubenswrapper[4798]: I1011 04:12:11.304906 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6008f2a1-8398-4d56-a3a2-c77e8923a800-config\") pod \"dnsmasq-dns-5f7f9f7cbf-rnw8v\" (UID: \"6008f2a1-8398-4d56-a3a2-c77e8923a800\") " pod="openstack/dnsmasq-dns-5f7f9f7cbf-rnw8v" Oct 11 04:12:11 crc kubenswrapper[4798]: I1011 04:12:11.304938 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vsljj\" (UniqueName: \"kubernetes.io/projected/6008f2a1-8398-4d56-a3a2-c77e8923a800-kube-api-access-vsljj\") pod \"dnsmasq-dns-5f7f9f7cbf-rnw8v\" (UID: \"6008f2a1-8398-4d56-a3a2-c77e8923a800\") " pod="openstack/dnsmasq-dns-5f7f9f7cbf-rnw8v" Oct 11 04:12:11 crc kubenswrapper[4798]: I1011 04:12:11.304955 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bc36d523-b6e0-4b0e-a50f-45a173f728b1-scripts\") pod \"cinder-scheduler-0\" (UID: \"bc36d523-b6e0-4b0e-a50f-45a173f728b1\") " pod="openstack/cinder-scheduler-0" Oct 11 04:12:11 crc kubenswrapper[4798]: I1011 04:12:11.304977 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6008f2a1-8398-4d56-a3a2-c77e8923a800-ovsdbserver-sb\") pod \"dnsmasq-dns-5f7f9f7cbf-rnw8v\" (UID: \"6008f2a1-8398-4d56-a3a2-c77e8923a800\") " pod="openstack/dnsmasq-dns-5f7f9f7cbf-rnw8v" Oct 11 04:12:11 crc kubenswrapper[4798]: I1011 04:12:11.306000 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6008f2a1-8398-4d56-a3a2-c77e8923a800-ovsdbserver-sb\") pod \"dnsmasq-dns-5f7f9f7cbf-rnw8v\" (UID: \"6008f2a1-8398-4d56-a3a2-c77e8923a800\") " pod="openstack/dnsmasq-dns-5f7f9f7cbf-rnw8v" Oct 11 04:12:11 crc kubenswrapper[4798]: I1011 04:12:11.307104 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6008f2a1-8398-4d56-a3a2-c77e8923a800-dns-svc\") pod \"dnsmasq-dns-5f7f9f7cbf-rnw8v\" (UID: \"6008f2a1-8398-4d56-a3a2-c77e8923a800\") " pod="openstack/dnsmasq-dns-5f7f9f7cbf-rnw8v" Oct 11 04:12:11 crc kubenswrapper[4798]: I1011 04:12:11.307617 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6008f2a1-8398-4d56-a3a2-c77e8923a800-ovsdbserver-nb\") pod \"dnsmasq-dns-5f7f9f7cbf-rnw8v\" (UID: \"6008f2a1-8398-4d56-a3a2-c77e8923a800\") " pod="openstack/dnsmasq-dns-5f7f9f7cbf-rnw8v" Oct 11 04:12:11 crc kubenswrapper[4798]: I1011 04:12:11.321838 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/bc36d523-b6e0-4b0e-a50f-45a173f728b1-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"bc36d523-b6e0-4b0e-a50f-45a173f728b1\") " pod="openstack/cinder-scheduler-0" Oct 11 04:12:11 crc kubenswrapper[4798]: I1011 04:12:11.323248 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6008f2a1-8398-4d56-a3a2-c77e8923a800-config\") pod \"dnsmasq-dns-5f7f9f7cbf-rnw8v\" (UID: \"6008f2a1-8398-4d56-a3a2-c77e8923a800\") " pod="openstack/dnsmasq-dns-5f7f9f7cbf-rnw8v" Oct 11 04:12:11 crc kubenswrapper[4798]: I1011 04:12:11.328591 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/bc36d523-b6e0-4b0e-a50f-45a173f728b1-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"bc36d523-b6e0-4b0e-a50f-45a173f728b1\") " pod="openstack/cinder-scheduler-0" Oct 11 04:12:11 crc kubenswrapper[4798]: I1011 04:12:11.332986 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bc36d523-b6e0-4b0e-a50f-45a173f728b1-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"bc36d523-b6e0-4b0e-a50f-45a173f728b1\") " pod="openstack/cinder-scheduler-0" Oct 11 04:12:11 crc kubenswrapper[4798]: I1011 04:12:11.340448 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bc36d523-b6e0-4b0e-a50f-45a173f728b1-config-data\") pod \"cinder-scheduler-0\" (UID: \"bc36d523-b6e0-4b0e-a50f-45a173f728b1\") " pod="openstack/cinder-scheduler-0" Oct 11 04:12:11 crc kubenswrapper[4798]: I1011 04:12:11.359064 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2h6qq\" (UniqueName: \"kubernetes.io/projected/bc36d523-b6e0-4b0e-a50f-45a173f728b1-kube-api-access-2h6qq\") pod \"cinder-scheduler-0\" (UID: \"bc36d523-b6e0-4b0e-a50f-45a173f728b1\") " pod="openstack/cinder-scheduler-0" Oct 11 04:12:11 crc kubenswrapper[4798]: I1011 04:12:11.366684 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bc36d523-b6e0-4b0e-a50f-45a173f728b1-scripts\") pod \"cinder-scheduler-0\" (UID: \"bc36d523-b6e0-4b0e-a50f-45a173f728b1\") " pod="openstack/cinder-scheduler-0" Oct 11 04:12:11 crc kubenswrapper[4798]: I1011 04:12:11.392425 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vsljj\" (UniqueName: \"kubernetes.io/projected/6008f2a1-8398-4d56-a3a2-c77e8923a800-kube-api-access-vsljj\") pod \"dnsmasq-dns-5f7f9f7cbf-rnw8v\" (UID: \"6008f2a1-8398-4d56-a3a2-c77e8923a800\") " pod="openstack/dnsmasq-dns-5f7f9f7cbf-rnw8v" Oct 11 04:12:11 crc kubenswrapper[4798]: I1011 04:12:11.411339 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/1969fe5d-5381-4726-b380-6e3e89e3c68b-config-data-custom\") pod \"cinder-api-0\" (UID: \"1969fe5d-5381-4726-b380-6e3e89e3c68b\") " pod="openstack/cinder-api-0" Oct 11 04:12:11 crc kubenswrapper[4798]: I1011 04:12:11.411416 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sljn6\" (UniqueName: \"kubernetes.io/projected/1969fe5d-5381-4726-b380-6e3e89e3c68b-kube-api-access-sljn6\") pod \"cinder-api-0\" (UID: \"1969fe5d-5381-4726-b380-6e3e89e3c68b\") " pod="openstack/cinder-api-0" Oct 11 04:12:11 crc kubenswrapper[4798]: I1011 04:12:11.411775 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1969fe5d-5381-4726-b380-6e3e89e3c68b-scripts\") pod \"cinder-api-0\" (UID: \"1969fe5d-5381-4726-b380-6e3e89e3c68b\") " pod="openstack/cinder-api-0" Oct 11 04:12:11 crc kubenswrapper[4798]: I1011 04:12:11.411804 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1969fe5d-5381-4726-b380-6e3e89e3c68b-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"1969fe5d-5381-4726-b380-6e3e89e3c68b\") " pod="openstack/cinder-api-0" Oct 11 04:12:11 crc kubenswrapper[4798]: I1011 04:12:11.411843 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1969fe5d-5381-4726-b380-6e3e89e3c68b-logs\") pod \"cinder-api-0\" (UID: \"1969fe5d-5381-4726-b380-6e3e89e3c68b\") " pod="openstack/cinder-api-0" Oct 11 04:12:11 crc kubenswrapper[4798]: I1011 04:12:11.411950 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1969fe5d-5381-4726-b380-6e3e89e3c68b-config-data\") pod \"cinder-api-0\" (UID: \"1969fe5d-5381-4726-b380-6e3e89e3c68b\") " pod="openstack/cinder-api-0" Oct 11 04:12:11 crc kubenswrapper[4798]: I1011 04:12:11.412030 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/1969fe5d-5381-4726-b380-6e3e89e3c68b-etc-machine-id\") pod \"cinder-api-0\" (UID: \"1969fe5d-5381-4726-b380-6e3e89e3c68b\") " pod="openstack/cinder-api-0" Oct 11 04:12:11 crc kubenswrapper[4798]: I1011 04:12:11.464889 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5f7f9f7cbf-rnw8v" Oct 11 04:12:11 crc kubenswrapper[4798]: I1011 04:12:11.513132 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1969fe5d-5381-4726-b380-6e3e89e3c68b-scripts\") pod \"cinder-api-0\" (UID: \"1969fe5d-5381-4726-b380-6e3e89e3c68b\") " pod="openstack/cinder-api-0" Oct 11 04:12:11 crc kubenswrapper[4798]: I1011 04:12:11.513195 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1969fe5d-5381-4726-b380-6e3e89e3c68b-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"1969fe5d-5381-4726-b380-6e3e89e3c68b\") " pod="openstack/cinder-api-0" Oct 11 04:12:11 crc kubenswrapper[4798]: I1011 04:12:11.513224 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1969fe5d-5381-4726-b380-6e3e89e3c68b-logs\") pod \"cinder-api-0\" (UID: \"1969fe5d-5381-4726-b380-6e3e89e3c68b\") " pod="openstack/cinder-api-0" Oct 11 04:12:11 crc kubenswrapper[4798]: I1011 04:12:11.513283 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1969fe5d-5381-4726-b380-6e3e89e3c68b-config-data\") pod \"cinder-api-0\" (UID: \"1969fe5d-5381-4726-b380-6e3e89e3c68b\") " pod="openstack/cinder-api-0" Oct 11 04:12:11 crc kubenswrapper[4798]: I1011 04:12:11.513343 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/1969fe5d-5381-4726-b380-6e3e89e3c68b-etc-machine-id\") pod \"cinder-api-0\" (UID: \"1969fe5d-5381-4726-b380-6e3e89e3c68b\") " pod="openstack/cinder-api-0" Oct 11 04:12:11 crc kubenswrapper[4798]: I1011 04:12:11.513540 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/1969fe5d-5381-4726-b380-6e3e89e3c68b-config-data-custom\") pod \"cinder-api-0\" (UID: \"1969fe5d-5381-4726-b380-6e3e89e3c68b\") " pod="openstack/cinder-api-0" Oct 11 04:12:11 crc kubenswrapper[4798]: I1011 04:12:11.513584 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sljn6\" (UniqueName: \"kubernetes.io/projected/1969fe5d-5381-4726-b380-6e3e89e3c68b-kube-api-access-sljn6\") pod \"cinder-api-0\" (UID: \"1969fe5d-5381-4726-b380-6e3e89e3c68b\") " pod="openstack/cinder-api-0" Oct 11 04:12:11 crc kubenswrapper[4798]: I1011 04:12:11.515145 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/1969fe5d-5381-4726-b380-6e3e89e3c68b-etc-machine-id\") pod \"cinder-api-0\" (UID: \"1969fe5d-5381-4726-b380-6e3e89e3c68b\") " pod="openstack/cinder-api-0" Oct 11 04:12:11 crc kubenswrapper[4798]: I1011 04:12:11.515731 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1969fe5d-5381-4726-b380-6e3e89e3c68b-logs\") pod \"cinder-api-0\" (UID: \"1969fe5d-5381-4726-b380-6e3e89e3c68b\") " pod="openstack/cinder-api-0" Oct 11 04:12:11 crc kubenswrapper[4798]: I1011 04:12:11.518806 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1969fe5d-5381-4726-b380-6e3e89e3c68b-scripts\") pod \"cinder-api-0\" (UID: \"1969fe5d-5381-4726-b380-6e3e89e3c68b\") " pod="openstack/cinder-api-0" Oct 11 04:12:11 crc kubenswrapper[4798]: I1011 04:12:11.522445 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1969fe5d-5381-4726-b380-6e3e89e3c68b-config-data\") pod \"cinder-api-0\" (UID: \"1969fe5d-5381-4726-b380-6e3e89e3c68b\") " pod="openstack/cinder-api-0" Oct 11 04:12:11 crc kubenswrapper[4798]: I1011 04:12:11.524734 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1969fe5d-5381-4726-b380-6e3e89e3c68b-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"1969fe5d-5381-4726-b380-6e3e89e3c68b\") " pod="openstack/cinder-api-0" Oct 11 04:12:11 crc kubenswrapper[4798]: I1011 04:12:11.524992 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/1969fe5d-5381-4726-b380-6e3e89e3c68b-config-data-custom\") pod \"cinder-api-0\" (UID: \"1969fe5d-5381-4726-b380-6e3e89e3c68b\") " pod="openstack/cinder-api-0" Oct 11 04:12:11 crc kubenswrapper[4798]: I1011 04:12:11.531556 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sljn6\" (UniqueName: \"kubernetes.io/projected/1969fe5d-5381-4726-b380-6e3e89e3c68b-kube-api-access-sljn6\") pod \"cinder-api-0\" (UID: \"1969fe5d-5381-4726-b380-6e3e89e3c68b\") " pod="openstack/cinder-api-0" Oct 11 04:12:11 crc kubenswrapper[4798]: I1011 04:12:11.645944 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Oct 11 04:12:11 crc kubenswrapper[4798]: I1011 04:12:11.705929 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"dbe7cb03-b84f-4e72-a995-8d364b9d53ea","Type":"ContainerStarted","Data":"0db8b307faf50ea0455a970691914b8ddb9749ba23ecdc8600559fc9d5367090"} Oct 11 04:12:11 crc kubenswrapper[4798]: I1011 04:12:11.706199 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Oct 11 04:12:11 crc kubenswrapper[4798]: I1011 04:12:11.709459 4798 generic.go:334] "Generic (PLEG): container finished" podID="1afeba46-c2cc-455e-ab80-56f553c9e4cb" containerID="26420071971f8dbe0fb85a3a7c12df46a1f5fb44fb1420e0ed0622a421d484da" exitCode=0 Oct 11 04:12:11 crc kubenswrapper[4798]: I1011 04:12:11.709498 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-f5wn5" event={"ID":"1afeba46-c2cc-455e-ab80-56f553c9e4cb","Type":"ContainerDied","Data":"26420071971f8dbe0fb85a3a7c12df46a1f5fb44fb1420e0ed0622a421d484da"} Oct 11 04:12:11 crc kubenswrapper[4798]: I1011 04:12:11.732237 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.487383386 podStartE2EDuration="5.732219647s" podCreationTimestamp="2025-10-11 04:12:06 +0000 UTC" firstStartedPulling="2025-10-11 04:12:07.586774658 +0000 UTC m=+1022.923064344" lastFinishedPulling="2025-10-11 04:12:10.831610879 +0000 UTC m=+1026.167900605" observedRunningTime="2025-10-11 04:12:11.724927163 +0000 UTC m=+1027.061216849" watchObservedRunningTime="2025-10-11 04:12:11.732219647 +0000 UTC m=+1027.068509333" Oct 11 04:12:11 crc kubenswrapper[4798]: I1011 04:12:11.782880 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Oct 11 04:12:11 crc kubenswrapper[4798]: I1011 04:12:11.982553 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5f7f9f7cbf-rnw8v"] Oct 11 04:12:12 crc kubenswrapper[4798]: W1011 04:12:12.000620 4798 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6008f2a1_8398_4d56_a3a2_c77e8923a800.slice/crio-0db7c1364564c0ab358a83e48a84695d4dcb7cf13d9e568aa0b27a5027207262 WatchSource:0}: Error finding container 0db7c1364564c0ab358a83e48a84695d4dcb7cf13d9e568aa0b27a5027207262: Status 404 returned error can't find the container with id 0db7c1364564c0ab358a83e48a84695d4dcb7cf13d9e568aa0b27a5027207262 Oct 11 04:12:12 crc kubenswrapper[4798]: I1011 04:12:12.006813 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Oct 11 04:12:12 crc kubenswrapper[4798]: I1011 04:12:12.350569 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Oct 11 04:12:12 crc kubenswrapper[4798]: W1011 04:12:12.366127 4798 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1969fe5d_5381_4726_b380_6e3e89e3c68b.slice/crio-23e82288189ca98e3d65ce5bc9db392dbef1e1acb673e270f015c1495f339086 WatchSource:0}: Error finding container 23e82288189ca98e3d65ce5bc9db392dbef1e1acb673e270f015c1495f339086: Status 404 returned error can't find the container with id 23e82288189ca98e3d65ce5bc9db392dbef1e1acb673e270f015c1495f339086 Oct 11 04:12:12 crc kubenswrapper[4798]: I1011 04:12:12.477270 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-7466b4ffd-mfb97" Oct 11 04:12:12 crc kubenswrapper[4798]: I1011 04:12:12.551850 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-68596b54cd-66kzb"] Oct 11 04:12:12 crc kubenswrapper[4798]: I1011 04:12:12.556093 4798 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-68596b54cd-66kzb" podUID="a5162cce-7f28-490f-8a4d-f1d10824cafa" containerName="barbican-api-log" containerID="cri-o://5d5f151c738d87a7b83579cfc83f1e6f46d64e19e01dae21a068939d62323028" gracePeriod=30 Oct 11 04:12:12 crc kubenswrapper[4798]: I1011 04:12:12.556691 4798 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-68596b54cd-66kzb" podUID="a5162cce-7f28-490f-8a4d-f1d10824cafa" containerName="barbican-api" containerID="cri-o://a669bc49245b6310c09180cf470126e8dade49e6594de28edd1f013521855cf3" gracePeriod=30 Oct 11 04:12:12 crc kubenswrapper[4798]: I1011 04:12:12.721792 4798 generic.go:334] "Generic (PLEG): container finished" podID="6008f2a1-8398-4d56-a3a2-c77e8923a800" containerID="d6873293387f7a0b478188e11689e927740161c01463ef54f77a859fb3a91fa9" exitCode=0 Oct 11 04:12:12 crc kubenswrapper[4798]: I1011 04:12:12.721868 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5f7f9f7cbf-rnw8v" event={"ID":"6008f2a1-8398-4d56-a3a2-c77e8923a800","Type":"ContainerDied","Data":"d6873293387f7a0b478188e11689e927740161c01463ef54f77a859fb3a91fa9"} Oct 11 04:12:12 crc kubenswrapper[4798]: I1011 04:12:12.721898 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5f7f9f7cbf-rnw8v" event={"ID":"6008f2a1-8398-4d56-a3a2-c77e8923a800","Type":"ContainerStarted","Data":"0db7c1364564c0ab358a83e48a84695d4dcb7cf13d9e568aa0b27a5027207262"} Oct 11 04:12:12 crc kubenswrapper[4798]: I1011 04:12:12.727775 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"1969fe5d-5381-4726-b380-6e3e89e3c68b","Type":"ContainerStarted","Data":"23e82288189ca98e3d65ce5bc9db392dbef1e1acb673e270f015c1495f339086"} Oct 11 04:12:12 crc kubenswrapper[4798]: I1011 04:12:12.730077 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"bc36d523-b6e0-4b0e-a50f-45a173f728b1","Type":"ContainerStarted","Data":"2031c713752bbc3430ed3c517f44efa7bae2dc893a9cd6f143938676fe5d5cce"} Oct 11 04:12:12 crc kubenswrapper[4798]: I1011 04:12:12.744209 4798 generic.go:334] "Generic (PLEG): container finished" podID="a5162cce-7f28-490f-8a4d-f1d10824cafa" containerID="5d5f151c738d87a7b83579cfc83f1e6f46d64e19e01dae21a068939d62323028" exitCode=143 Oct 11 04:12:12 crc kubenswrapper[4798]: I1011 04:12:12.744310 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-68596b54cd-66kzb" event={"ID":"a5162cce-7f28-490f-8a4d-f1d10824cafa","Type":"ContainerDied","Data":"5d5f151c738d87a7b83579cfc83f1e6f46d64e19e01dae21a068939d62323028"} Oct 11 04:12:13 crc kubenswrapper[4798]: I1011 04:12:13.202749 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-f5wn5" Oct 11 04:12:13 crc kubenswrapper[4798]: I1011 04:12:13.258718 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Oct 11 04:12:13 crc kubenswrapper[4798]: I1011 04:12:13.267905 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/1afeba46-c2cc-455e-ab80-56f553c9e4cb-config\") pod \"1afeba46-c2cc-455e-ab80-56f553c9e4cb\" (UID: \"1afeba46-c2cc-455e-ab80-56f553c9e4cb\") " Oct 11 04:12:13 crc kubenswrapper[4798]: I1011 04:12:13.268041 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-psdsj\" (UniqueName: \"kubernetes.io/projected/1afeba46-c2cc-455e-ab80-56f553c9e4cb-kube-api-access-psdsj\") pod \"1afeba46-c2cc-455e-ab80-56f553c9e4cb\" (UID: \"1afeba46-c2cc-455e-ab80-56f553c9e4cb\") " Oct 11 04:12:13 crc kubenswrapper[4798]: I1011 04:12:13.268090 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1afeba46-c2cc-455e-ab80-56f553c9e4cb-combined-ca-bundle\") pod \"1afeba46-c2cc-455e-ab80-56f553c9e4cb\" (UID: \"1afeba46-c2cc-455e-ab80-56f553c9e4cb\") " Oct 11 04:12:13 crc kubenswrapper[4798]: I1011 04:12:13.275332 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1afeba46-c2cc-455e-ab80-56f553c9e4cb-kube-api-access-psdsj" (OuterVolumeSpecName: "kube-api-access-psdsj") pod "1afeba46-c2cc-455e-ab80-56f553c9e4cb" (UID: "1afeba46-c2cc-455e-ab80-56f553c9e4cb"). InnerVolumeSpecName "kube-api-access-psdsj". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 04:12:13 crc kubenswrapper[4798]: I1011 04:12:13.309861 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1afeba46-c2cc-455e-ab80-56f553c9e4cb-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "1afeba46-c2cc-455e-ab80-56f553c9e4cb" (UID: "1afeba46-c2cc-455e-ab80-56f553c9e4cb"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:12:13 crc kubenswrapper[4798]: I1011 04:12:13.315072 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1afeba46-c2cc-455e-ab80-56f553c9e4cb-config" (OuterVolumeSpecName: "config") pod "1afeba46-c2cc-455e-ab80-56f553c9e4cb" (UID: "1afeba46-c2cc-455e-ab80-56f553c9e4cb"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:12:13 crc kubenswrapper[4798]: I1011 04:12:13.371157 4798 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/1afeba46-c2cc-455e-ab80-56f553c9e4cb-config\") on node \"crc\" DevicePath \"\"" Oct 11 04:12:13 crc kubenswrapper[4798]: I1011 04:12:13.371229 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-psdsj\" (UniqueName: \"kubernetes.io/projected/1afeba46-c2cc-455e-ab80-56f553c9e4cb-kube-api-access-psdsj\") on node \"crc\" DevicePath \"\"" Oct 11 04:12:13 crc kubenswrapper[4798]: I1011 04:12:13.371243 4798 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1afeba46-c2cc-455e-ab80-56f553c9e4cb-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 11 04:12:13 crc kubenswrapper[4798]: I1011 04:12:13.758787 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5f7f9f7cbf-rnw8v" event={"ID":"6008f2a1-8398-4d56-a3a2-c77e8923a800","Type":"ContainerStarted","Data":"e05a865f2d237639713abc42cfa5f8b52aeb1a31f0fc43cf02f7077997ae7450"} Oct 11 04:12:13 crc kubenswrapper[4798]: I1011 04:12:13.761968 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5f7f9f7cbf-rnw8v" Oct 11 04:12:13 crc kubenswrapper[4798]: I1011 04:12:13.766236 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"1969fe5d-5381-4726-b380-6e3e89e3c68b","Type":"ContainerStarted","Data":"8e880984e4e0abf24bb17403955cdc66b1ac83cf5d48ca6d846923e18e9bbfdb"} Oct 11 04:12:13 crc kubenswrapper[4798]: I1011 04:12:13.790006 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5f7f9f7cbf-rnw8v" podStartSLOduration=2.789991539 podStartE2EDuration="2.789991539s" podCreationTimestamp="2025-10-11 04:12:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 04:12:13.78794309 +0000 UTC m=+1029.124232766" watchObservedRunningTime="2025-10-11 04:12:13.789991539 +0000 UTC m=+1029.126281225" Oct 11 04:12:13 crc kubenswrapper[4798]: I1011 04:12:13.790816 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-f5wn5" event={"ID":"1afeba46-c2cc-455e-ab80-56f553c9e4cb","Type":"ContainerDied","Data":"7d8369d5b8f2bd49e7d5814f6b9e1a896c5c2e00b1bb385b16320a0452556933"} Oct 11 04:12:13 crc kubenswrapper[4798]: I1011 04:12:13.791836 4798 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7d8369d5b8f2bd49e7d5814f6b9e1a896c5c2e00b1bb385b16320a0452556933" Oct 11 04:12:13 crc kubenswrapper[4798]: I1011 04:12:13.790888 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-f5wn5" Oct 11 04:12:13 crc kubenswrapper[4798]: I1011 04:12:13.796196 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"bc36d523-b6e0-4b0e-a50f-45a173f728b1","Type":"ContainerStarted","Data":"2539e9a6d5c9ddd24e80380f4158dca5f0fa866b3cb69f4606f662d1366fcce4"} Oct 11 04:12:13 crc kubenswrapper[4798]: I1011 04:12:13.936698 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5f7f9f7cbf-rnw8v"] Oct 11 04:12:13 crc kubenswrapper[4798]: I1011 04:12:13.983921 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-58db5546cc-zfln8"] Oct 11 04:12:13 crc kubenswrapper[4798]: E1011 04:12:13.985063 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1afeba46-c2cc-455e-ab80-56f553c9e4cb" containerName="neutron-db-sync" Oct 11 04:12:13 crc kubenswrapper[4798]: I1011 04:12:13.985137 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="1afeba46-c2cc-455e-ab80-56f553c9e4cb" containerName="neutron-db-sync" Oct 11 04:12:13 crc kubenswrapper[4798]: I1011 04:12:13.986343 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="1afeba46-c2cc-455e-ab80-56f553c9e4cb" containerName="neutron-db-sync" Oct 11 04:12:14 crc kubenswrapper[4798]: I1011 04:12:14.000076 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-58db5546cc-zfln8" Oct 11 04:12:14 crc kubenswrapper[4798]: I1011 04:12:14.007683 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-58db5546cc-zfln8"] Oct 11 04:12:14 crc kubenswrapper[4798]: I1011 04:12:14.059567 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-766995c844-7nlzq"] Oct 11 04:12:14 crc kubenswrapper[4798]: I1011 04:12:14.061201 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-766995c844-7nlzq" Oct 11 04:12:14 crc kubenswrapper[4798]: I1011 04:12:14.067706 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Oct 11 04:12:14 crc kubenswrapper[4798]: I1011 04:12:14.068534 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-ovndbs" Oct 11 04:12:14 crc kubenswrapper[4798]: I1011 04:12:14.069221 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-4q929" Oct 11 04:12:14 crc kubenswrapper[4798]: I1011 04:12:14.069259 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Oct 11 04:12:14 crc kubenswrapper[4798]: I1011 04:12:14.078075 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-766995c844-7nlzq"] Oct 11 04:12:14 crc kubenswrapper[4798]: I1011 04:12:14.198367 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/eb2fdf9e-64f1-40bb-b964-73bbe357085c-combined-ca-bundle\") pod \"neutron-766995c844-7nlzq\" (UID: \"eb2fdf9e-64f1-40bb-b964-73bbe357085c\") " pod="openstack/neutron-766995c844-7nlzq" Oct 11 04:12:14 crc kubenswrapper[4798]: I1011 04:12:14.198478 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c2b0d4f5-de54-44c6-9c2c-c4285ffbbe78-config\") pod \"dnsmasq-dns-58db5546cc-zfln8\" (UID: \"c2b0d4f5-de54-44c6-9c2c-c4285ffbbe78\") " pod="openstack/dnsmasq-dns-58db5546cc-zfln8" Oct 11 04:12:14 crc kubenswrapper[4798]: I1011 04:12:14.198508 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xpth7\" (UniqueName: \"kubernetes.io/projected/eb2fdf9e-64f1-40bb-b964-73bbe357085c-kube-api-access-xpth7\") pod \"neutron-766995c844-7nlzq\" (UID: \"eb2fdf9e-64f1-40bb-b964-73bbe357085c\") " pod="openstack/neutron-766995c844-7nlzq" Oct 11 04:12:14 crc kubenswrapper[4798]: I1011 04:12:14.198559 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c2b0d4f5-de54-44c6-9c2c-c4285ffbbe78-ovsdbserver-nb\") pod \"dnsmasq-dns-58db5546cc-zfln8\" (UID: \"c2b0d4f5-de54-44c6-9c2c-c4285ffbbe78\") " pod="openstack/dnsmasq-dns-58db5546cc-zfln8" Oct 11 04:12:14 crc kubenswrapper[4798]: I1011 04:12:14.198577 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l77w8\" (UniqueName: \"kubernetes.io/projected/c2b0d4f5-de54-44c6-9c2c-c4285ffbbe78-kube-api-access-l77w8\") pod \"dnsmasq-dns-58db5546cc-zfln8\" (UID: \"c2b0d4f5-de54-44c6-9c2c-c4285ffbbe78\") " pod="openstack/dnsmasq-dns-58db5546cc-zfln8" Oct 11 04:12:14 crc kubenswrapper[4798]: I1011 04:12:14.198613 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/eb2fdf9e-64f1-40bb-b964-73bbe357085c-config\") pod \"neutron-766995c844-7nlzq\" (UID: \"eb2fdf9e-64f1-40bb-b964-73bbe357085c\") " pod="openstack/neutron-766995c844-7nlzq" Oct 11 04:12:14 crc kubenswrapper[4798]: I1011 04:12:14.198651 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c2b0d4f5-de54-44c6-9c2c-c4285ffbbe78-dns-svc\") pod \"dnsmasq-dns-58db5546cc-zfln8\" (UID: \"c2b0d4f5-de54-44c6-9c2c-c4285ffbbe78\") " pod="openstack/dnsmasq-dns-58db5546cc-zfln8" Oct 11 04:12:14 crc kubenswrapper[4798]: I1011 04:12:14.198671 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/eb2fdf9e-64f1-40bb-b964-73bbe357085c-httpd-config\") pod \"neutron-766995c844-7nlzq\" (UID: \"eb2fdf9e-64f1-40bb-b964-73bbe357085c\") " pod="openstack/neutron-766995c844-7nlzq" Oct 11 04:12:14 crc kubenswrapper[4798]: I1011 04:12:14.198709 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c2b0d4f5-de54-44c6-9c2c-c4285ffbbe78-ovsdbserver-sb\") pod \"dnsmasq-dns-58db5546cc-zfln8\" (UID: \"c2b0d4f5-de54-44c6-9c2c-c4285ffbbe78\") " pod="openstack/dnsmasq-dns-58db5546cc-zfln8" Oct 11 04:12:14 crc kubenswrapper[4798]: I1011 04:12:14.198750 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/eb2fdf9e-64f1-40bb-b964-73bbe357085c-ovndb-tls-certs\") pod \"neutron-766995c844-7nlzq\" (UID: \"eb2fdf9e-64f1-40bb-b964-73bbe357085c\") " pod="openstack/neutron-766995c844-7nlzq" Oct 11 04:12:14 crc kubenswrapper[4798]: I1011 04:12:14.301416 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/eb2fdf9e-64f1-40bb-b964-73bbe357085c-combined-ca-bundle\") pod \"neutron-766995c844-7nlzq\" (UID: \"eb2fdf9e-64f1-40bb-b964-73bbe357085c\") " pod="openstack/neutron-766995c844-7nlzq" Oct 11 04:12:14 crc kubenswrapper[4798]: I1011 04:12:14.301482 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c2b0d4f5-de54-44c6-9c2c-c4285ffbbe78-config\") pod \"dnsmasq-dns-58db5546cc-zfln8\" (UID: \"c2b0d4f5-de54-44c6-9c2c-c4285ffbbe78\") " pod="openstack/dnsmasq-dns-58db5546cc-zfln8" Oct 11 04:12:14 crc kubenswrapper[4798]: I1011 04:12:14.301512 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xpth7\" (UniqueName: \"kubernetes.io/projected/eb2fdf9e-64f1-40bb-b964-73bbe357085c-kube-api-access-xpth7\") pod \"neutron-766995c844-7nlzq\" (UID: \"eb2fdf9e-64f1-40bb-b964-73bbe357085c\") " pod="openstack/neutron-766995c844-7nlzq" Oct 11 04:12:14 crc kubenswrapper[4798]: I1011 04:12:14.301563 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c2b0d4f5-de54-44c6-9c2c-c4285ffbbe78-ovsdbserver-nb\") pod \"dnsmasq-dns-58db5546cc-zfln8\" (UID: \"c2b0d4f5-de54-44c6-9c2c-c4285ffbbe78\") " pod="openstack/dnsmasq-dns-58db5546cc-zfln8" Oct 11 04:12:14 crc kubenswrapper[4798]: I1011 04:12:14.301584 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l77w8\" (UniqueName: \"kubernetes.io/projected/c2b0d4f5-de54-44c6-9c2c-c4285ffbbe78-kube-api-access-l77w8\") pod \"dnsmasq-dns-58db5546cc-zfln8\" (UID: \"c2b0d4f5-de54-44c6-9c2c-c4285ffbbe78\") " pod="openstack/dnsmasq-dns-58db5546cc-zfln8" Oct 11 04:12:14 crc kubenswrapper[4798]: I1011 04:12:14.301623 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/eb2fdf9e-64f1-40bb-b964-73bbe357085c-config\") pod \"neutron-766995c844-7nlzq\" (UID: \"eb2fdf9e-64f1-40bb-b964-73bbe357085c\") " pod="openstack/neutron-766995c844-7nlzq" Oct 11 04:12:14 crc kubenswrapper[4798]: I1011 04:12:14.301660 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c2b0d4f5-de54-44c6-9c2c-c4285ffbbe78-dns-svc\") pod \"dnsmasq-dns-58db5546cc-zfln8\" (UID: \"c2b0d4f5-de54-44c6-9c2c-c4285ffbbe78\") " pod="openstack/dnsmasq-dns-58db5546cc-zfln8" Oct 11 04:12:14 crc kubenswrapper[4798]: I1011 04:12:14.301675 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/eb2fdf9e-64f1-40bb-b964-73bbe357085c-httpd-config\") pod \"neutron-766995c844-7nlzq\" (UID: \"eb2fdf9e-64f1-40bb-b964-73bbe357085c\") " pod="openstack/neutron-766995c844-7nlzq" Oct 11 04:12:14 crc kubenswrapper[4798]: I1011 04:12:14.301692 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c2b0d4f5-de54-44c6-9c2c-c4285ffbbe78-ovsdbserver-sb\") pod \"dnsmasq-dns-58db5546cc-zfln8\" (UID: \"c2b0d4f5-de54-44c6-9c2c-c4285ffbbe78\") " pod="openstack/dnsmasq-dns-58db5546cc-zfln8" Oct 11 04:12:14 crc kubenswrapper[4798]: I1011 04:12:14.301720 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/eb2fdf9e-64f1-40bb-b964-73bbe357085c-ovndb-tls-certs\") pod \"neutron-766995c844-7nlzq\" (UID: \"eb2fdf9e-64f1-40bb-b964-73bbe357085c\") " pod="openstack/neutron-766995c844-7nlzq" Oct 11 04:12:14 crc kubenswrapper[4798]: I1011 04:12:14.309657 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c2b0d4f5-de54-44c6-9c2c-c4285ffbbe78-config\") pod \"dnsmasq-dns-58db5546cc-zfln8\" (UID: \"c2b0d4f5-de54-44c6-9c2c-c4285ffbbe78\") " pod="openstack/dnsmasq-dns-58db5546cc-zfln8" Oct 11 04:12:14 crc kubenswrapper[4798]: I1011 04:12:14.310194 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c2b0d4f5-de54-44c6-9c2c-c4285ffbbe78-ovsdbserver-nb\") pod \"dnsmasq-dns-58db5546cc-zfln8\" (UID: \"c2b0d4f5-de54-44c6-9c2c-c4285ffbbe78\") " pod="openstack/dnsmasq-dns-58db5546cc-zfln8" Oct 11 04:12:14 crc kubenswrapper[4798]: I1011 04:12:14.313277 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c2b0d4f5-de54-44c6-9c2c-c4285ffbbe78-ovsdbserver-sb\") pod \"dnsmasq-dns-58db5546cc-zfln8\" (UID: \"c2b0d4f5-de54-44c6-9c2c-c4285ffbbe78\") " pod="openstack/dnsmasq-dns-58db5546cc-zfln8" Oct 11 04:12:14 crc kubenswrapper[4798]: I1011 04:12:14.313420 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c2b0d4f5-de54-44c6-9c2c-c4285ffbbe78-dns-svc\") pod \"dnsmasq-dns-58db5546cc-zfln8\" (UID: \"c2b0d4f5-de54-44c6-9c2c-c4285ffbbe78\") " pod="openstack/dnsmasq-dns-58db5546cc-zfln8" Oct 11 04:12:14 crc kubenswrapper[4798]: I1011 04:12:14.321289 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/eb2fdf9e-64f1-40bb-b964-73bbe357085c-ovndb-tls-certs\") pod \"neutron-766995c844-7nlzq\" (UID: \"eb2fdf9e-64f1-40bb-b964-73bbe357085c\") " pod="openstack/neutron-766995c844-7nlzq" Oct 11 04:12:14 crc kubenswrapper[4798]: I1011 04:12:14.321406 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/eb2fdf9e-64f1-40bb-b964-73bbe357085c-config\") pod \"neutron-766995c844-7nlzq\" (UID: \"eb2fdf9e-64f1-40bb-b964-73bbe357085c\") " pod="openstack/neutron-766995c844-7nlzq" Oct 11 04:12:14 crc kubenswrapper[4798]: I1011 04:12:14.322228 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/eb2fdf9e-64f1-40bb-b964-73bbe357085c-combined-ca-bundle\") pod \"neutron-766995c844-7nlzq\" (UID: \"eb2fdf9e-64f1-40bb-b964-73bbe357085c\") " pod="openstack/neutron-766995c844-7nlzq" Oct 11 04:12:14 crc kubenswrapper[4798]: I1011 04:12:14.327038 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/eb2fdf9e-64f1-40bb-b964-73bbe357085c-httpd-config\") pod \"neutron-766995c844-7nlzq\" (UID: \"eb2fdf9e-64f1-40bb-b964-73bbe357085c\") " pod="openstack/neutron-766995c844-7nlzq" Oct 11 04:12:14 crc kubenswrapper[4798]: I1011 04:12:14.340311 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l77w8\" (UniqueName: \"kubernetes.io/projected/c2b0d4f5-de54-44c6-9c2c-c4285ffbbe78-kube-api-access-l77w8\") pod \"dnsmasq-dns-58db5546cc-zfln8\" (UID: \"c2b0d4f5-de54-44c6-9c2c-c4285ffbbe78\") " pod="openstack/dnsmasq-dns-58db5546cc-zfln8" Oct 11 04:12:14 crc kubenswrapper[4798]: I1011 04:12:14.366172 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xpth7\" (UniqueName: \"kubernetes.io/projected/eb2fdf9e-64f1-40bb-b964-73bbe357085c-kube-api-access-xpth7\") pod \"neutron-766995c844-7nlzq\" (UID: \"eb2fdf9e-64f1-40bb-b964-73bbe357085c\") " pod="openstack/neutron-766995c844-7nlzq" Oct 11 04:12:14 crc kubenswrapper[4798]: I1011 04:12:14.385814 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-58db5546cc-zfln8" Oct 11 04:12:14 crc kubenswrapper[4798]: I1011 04:12:14.400117 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-766995c844-7nlzq" Oct 11 04:12:14 crc kubenswrapper[4798]: I1011 04:12:14.811837 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"1969fe5d-5381-4726-b380-6e3e89e3c68b","Type":"ContainerStarted","Data":"9d3744cb37ea1d3c48ac547b475f811936eafe9b1b2602e4eaa6556f50bb83e5"} Oct 11 04:12:14 crc kubenswrapper[4798]: I1011 04:12:14.811917 4798 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="1969fe5d-5381-4726-b380-6e3e89e3c68b" containerName="cinder-api-log" containerID="cri-o://8e880984e4e0abf24bb17403955cdc66b1ac83cf5d48ca6d846923e18e9bbfdb" gracePeriod=30 Oct 11 04:12:14 crc kubenswrapper[4798]: I1011 04:12:14.812378 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Oct 11 04:12:14 crc kubenswrapper[4798]: I1011 04:12:14.812431 4798 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="1969fe5d-5381-4726-b380-6e3e89e3c68b" containerName="cinder-api" containerID="cri-o://9d3744cb37ea1d3c48ac547b475f811936eafe9b1b2602e4eaa6556f50bb83e5" gracePeriod=30 Oct 11 04:12:14 crc kubenswrapper[4798]: I1011 04:12:14.826822 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"bc36d523-b6e0-4b0e-a50f-45a173f728b1","Type":"ContainerStarted","Data":"d623ac4cf836ee3767e600e66a3eb9468b636b4ddd98e67486ed4bba6dbac1f2"} Oct 11 04:12:14 crc kubenswrapper[4798]: I1011 04:12:14.846002 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=3.845973427 podStartE2EDuration="3.845973427s" podCreationTimestamp="2025-10-11 04:12:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 04:12:14.837024023 +0000 UTC m=+1030.173313709" watchObservedRunningTime="2025-10-11 04:12:14.845973427 +0000 UTC m=+1030.182263123" Oct 11 04:12:14 crc kubenswrapper[4798]: I1011 04:12:14.869282 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=3.9949167709999998 podStartE2EDuration="4.869265672s" podCreationTimestamp="2025-10-11 04:12:10 +0000 UTC" firstStartedPulling="2025-10-11 04:12:11.988403564 +0000 UTC m=+1027.324693250" lastFinishedPulling="2025-10-11 04:12:12.862752465 +0000 UTC m=+1028.199042151" observedRunningTime="2025-10-11 04:12:14.865896052 +0000 UTC m=+1030.202185738" watchObservedRunningTime="2025-10-11 04:12:14.869265672 +0000 UTC m=+1030.205555358" Oct 11 04:12:15 crc kubenswrapper[4798]: I1011 04:12:15.059421 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-58db5546cc-zfln8"] Oct 11 04:12:15 crc kubenswrapper[4798]: I1011 04:12:15.256659 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-766995c844-7nlzq"] Oct 11 04:12:15 crc kubenswrapper[4798]: I1011 04:12:15.517031 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Oct 11 04:12:15 crc kubenswrapper[4798]: I1011 04:12:15.625921 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1969fe5d-5381-4726-b380-6e3e89e3c68b-combined-ca-bundle\") pod \"1969fe5d-5381-4726-b380-6e3e89e3c68b\" (UID: \"1969fe5d-5381-4726-b380-6e3e89e3c68b\") " Oct 11 04:12:15 crc kubenswrapper[4798]: I1011 04:12:15.626415 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/1969fe5d-5381-4726-b380-6e3e89e3c68b-config-data-custom\") pod \"1969fe5d-5381-4726-b380-6e3e89e3c68b\" (UID: \"1969fe5d-5381-4726-b380-6e3e89e3c68b\") " Oct 11 04:12:15 crc kubenswrapper[4798]: I1011 04:12:15.626476 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sljn6\" (UniqueName: \"kubernetes.io/projected/1969fe5d-5381-4726-b380-6e3e89e3c68b-kube-api-access-sljn6\") pod \"1969fe5d-5381-4726-b380-6e3e89e3c68b\" (UID: \"1969fe5d-5381-4726-b380-6e3e89e3c68b\") " Oct 11 04:12:15 crc kubenswrapper[4798]: I1011 04:12:15.626549 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1969fe5d-5381-4726-b380-6e3e89e3c68b-config-data\") pod \"1969fe5d-5381-4726-b380-6e3e89e3c68b\" (UID: \"1969fe5d-5381-4726-b380-6e3e89e3c68b\") " Oct 11 04:12:15 crc kubenswrapper[4798]: I1011 04:12:15.626591 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1969fe5d-5381-4726-b380-6e3e89e3c68b-logs\") pod \"1969fe5d-5381-4726-b380-6e3e89e3c68b\" (UID: \"1969fe5d-5381-4726-b380-6e3e89e3c68b\") " Oct 11 04:12:15 crc kubenswrapper[4798]: I1011 04:12:15.626741 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/1969fe5d-5381-4726-b380-6e3e89e3c68b-etc-machine-id\") pod \"1969fe5d-5381-4726-b380-6e3e89e3c68b\" (UID: \"1969fe5d-5381-4726-b380-6e3e89e3c68b\") " Oct 11 04:12:15 crc kubenswrapper[4798]: I1011 04:12:15.626765 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1969fe5d-5381-4726-b380-6e3e89e3c68b-scripts\") pod \"1969fe5d-5381-4726-b380-6e3e89e3c68b\" (UID: \"1969fe5d-5381-4726-b380-6e3e89e3c68b\") " Oct 11 04:12:15 crc kubenswrapper[4798]: I1011 04:12:15.627501 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/1969fe5d-5381-4726-b380-6e3e89e3c68b-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "1969fe5d-5381-4726-b380-6e3e89e3c68b" (UID: "1969fe5d-5381-4726-b380-6e3e89e3c68b"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 11 04:12:15 crc kubenswrapper[4798]: I1011 04:12:15.631018 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1969fe5d-5381-4726-b380-6e3e89e3c68b-logs" (OuterVolumeSpecName: "logs") pod "1969fe5d-5381-4726-b380-6e3e89e3c68b" (UID: "1969fe5d-5381-4726-b380-6e3e89e3c68b"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 04:12:15 crc kubenswrapper[4798]: I1011 04:12:15.631809 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1969fe5d-5381-4726-b380-6e3e89e3c68b-kube-api-access-sljn6" (OuterVolumeSpecName: "kube-api-access-sljn6") pod "1969fe5d-5381-4726-b380-6e3e89e3c68b" (UID: "1969fe5d-5381-4726-b380-6e3e89e3c68b"). InnerVolumeSpecName "kube-api-access-sljn6". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 04:12:15 crc kubenswrapper[4798]: I1011 04:12:15.635594 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1969fe5d-5381-4726-b380-6e3e89e3c68b-scripts" (OuterVolumeSpecName: "scripts") pod "1969fe5d-5381-4726-b380-6e3e89e3c68b" (UID: "1969fe5d-5381-4726-b380-6e3e89e3c68b"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:12:15 crc kubenswrapper[4798]: I1011 04:12:15.635648 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1969fe5d-5381-4726-b380-6e3e89e3c68b-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "1969fe5d-5381-4726-b380-6e3e89e3c68b" (UID: "1969fe5d-5381-4726-b380-6e3e89e3c68b"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:12:15 crc kubenswrapper[4798]: I1011 04:12:15.699984 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1969fe5d-5381-4726-b380-6e3e89e3c68b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "1969fe5d-5381-4726-b380-6e3e89e3c68b" (UID: "1969fe5d-5381-4726-b380-6e3e89e3c68b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:12:15 crc kubenswrapper[4798]: I1011 04:12:15.730952 4798 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1969fe5d-5381-4726-b380-6e3e89e3c68b-logs\") on node \"crc\" DevicePath \"\"" Oct 11 04:12:15 crc kubenswrapper[4798]: I1011 04:12:15.731003 4798 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/1969fe5d-5381-4726-b380-6e3e89e3c68b-etc-machine-id\") on node \"crc\" DevicePath \"\"" Oct 11 04:12:15 crc kubenswrapper[4798]: I1011 04:12:15.731016 4798 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1969fe5d-5381-4726-b380-6e3e89e3c68b-scripts\") on node \"crc\" DevicePath \"\"" Oct 11 04:12:15 crc kubenswrapper[4798]: I1011 04:12:15.731029 4798 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1969fe5d-5381-4726-b380-6e3e89e3c68b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 11 04:12:15 crc kubenswrapper[4798]: I1011 04:12:15.731062 4798 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/1969fe5d-5381-4726-b380-6e3e89e3c68b-config-data-custom\") on node \"crc\" DevicePath \"\"" Oct 11 04:12:15 crc kubenswrapper[4798]: I1011 04:12:15.731076 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sljn6\" (UniqueName: \"kubernetes.io/projected/1969fe5d-5381-4726-b380-6e3e89e3c68b-kube-api-access-sljn6\") on node \"crc\" DevicePath \"\"" Oct 11 04:12:15 crc kubenswrapper[4798]: I1011 04:12:15.737121 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1969fe5d-5381-4726-b380-6e3e89e3c68b-config-data" (OuterVolumeSpecName: "config-data") pod "1969fe5d-5381-4726-b380-6e3e89e3c68b" (UID: "1969fe5d-5381-4726-b380-6e3e89e3c68b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:12:15 crc kubenswrapper[4798]: I1011 04:12:15.770699 4798 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-68596b54cd-66kzb" podUID="a5162cce-7f28-490f-8a4d-f1d10824cafa" containerName="barbican-api-log" probeResult="failure" output="Get \"http://10.217.0.146:9311/healthcheck\": read tcp 10.217.0.2:39474->10.217.0.146:9311: read: connection reset by peer" Oct 11 04:12:15 crc kubenswrapper[4798]: I1011 04:12:15.770802 4798 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-68596b54cd-66kzb" podUID="a5162cce-7f28-490f-8a4d-f1d10824cafa" containerName="barbican-api" probeResult="failure" output="Get \"http://10.217.0.146:9311/healthcheck\": read tcp 10.217.0.2:39470->10.217.0.146:9311: read: connection reset by peer" Oct 11 04:12:15 crc kubenswrapper[4798]: I1011 04:12:15.832122 4798 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1969fe5d-5381-4726-b380-6e3e89e3c68b-config-data\") on node \"crc\" DevicePath \"\"" Oct 11 04:12:15 crc kubenswrapper[4798]: I1011 04:12:15.858400 4798 generic.go:334] "Generic (PLEG): container finished" podID="c2b0d4f5-de54-44c6-9c2c-c4285ffbbe78" containerID="67a08c46e15345ff800acdbe8a08dc3819ebe59e5e99c464d309b6f23c60a671" exitCode=0 Oct 11 04:12:15 crc kubenswrapper[4798]: I1011 04:12:15.858464 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-58db5546cc-zfln8" event={"ID":"c2b0d4f5-de54-44c6-9c2c-c4285ffbbe78","Type":"ContainerDied","Data":"67a08c46e15345ff800acdbe8a08dc3819ebe59e5e99c464d309b6f23c60a671"} Oct 11 04:12:15 crc kubenswrapper[4798]: I1011 04:12:15.858490 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-58db5546cc-zfln8" event={"ID":"c2b0d4f5-de54-44c6-9c2c-c4285ffbbe78","Type":"ContainerStarted","Data":"e22fef8e8bf92132ae51a020b929b4b538b983a4c36722ddee2a60b6cb265c28"} Oct 11 04:12:15 crc kubenswrapper[4798]: I1011 04:12:15.879011 4798 generic.go:334] "Generic (PLEG): container finished" podID="1969fe5d-5381-4726-b380-6e3e89e3c68b" containerID="9d3744cb37ea1d3c48ac547b475f811936eafe9b1b2602e4eaa6556f50bb83e5" exitCode=0 Oct 11 04:12:15 crc kubenswrapper[4798]: I1011 04:12:15.879049 4798 generic.go:334] "Generic (PLEG): container finished" podID="1969fe5d-5381-4726-b380-6e3e89e3c68b" containerID="8e880984e4e0abf24bb17403955cdc66b1ac83cf5d48ca6d846923e18e9bbfdb" exitCode=143 Oct 11 04:12:15 crc kubenswrapper[4798]: I1011 04:12:15.879112 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"1969fe5d-5381-4726-b380-6e3e89e3c68b","Type":"ContainerDied","Data":"9d3744cb37ea1d3c48ac547b475f811936eafe9b1b2602e4eaa6556f50bb83e5"} Oct 11 04:12:15 crc kubenswrapper[4798]: I1011 04:12:15.879143 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"1969fe5d-5381-4726-b380-6e3e89e3c68b","Type":"ContainerDied","Data":"8e880984e4e0abf24bb17403955cdc66b1ac83cf5d48ca6d846923e18e9bbfdb"} Oct 11 04:12:15 crc kubenswrapper[4798]: I1011 04:12:15.879155 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"1969fe5d-5381-4726-b380-6e3e89e3c68b","Type":"ContainerDied","Data":"23e82288189ca98e3d65ce5bc9db392dbef1e1acb673e270f015c1495f339086"} Oct 11 04:12:15 crc kubenswrapper[4798]: I1011 04:12:15.879175 4798 scope.go:117] "RemoveContainer" containerID="9d3744cb37ea1d3c48ac547b475f811936eafe9b1b2602e4eaa6556f50bb83e5" Oct 11 04:12:15 crc kubenswrapper[4798]: I1011 04:12:15.879317 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Oct 11 04:12:15 crc kubenswrapper[4798]: I1011 04:12:15.900291 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-766995c844-7nlzq" event={"ID":"eb2fdf9e-64f1-40bb-b964-73bbe357085c","Type":"ContainerStarted","Data":"30f23467c0fe08eea3dda771f49a3e9eba8c083b26b7234b4d5b9e602e1c2d93"} Oct 11 04:12:15 crc kubenswrapper[4798]: I1011 04:12:15.900378 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-766995c844-7nlzq" event={"ID":"eb2fdf9e-64f1-40bb-b964-73bbe357085c","Type":"ContainerStarted","Data":"0e43d9448071d502c32ec0f573e0ba7c5d257342a98b682d7a15d841faaf5574"} Oct 11 04:12:15 crc kubenswrapper[4798]: I1011 04:12:15.906514 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-6574767f6b-8p24k" event={"ID":"141c65d6-e8c4-4ae3-be7b-7adf75193efc","Type":"ContainerStarted","Data":"0c17db4226496fdf70cb9119a669bd9b5b1c703445e8d507729ba11954b271a4"} Oct 11 04:12:15 crc kubenswrapper[4798]: I1011 04:12:15.906541 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-6574767f6b-8p24k" event={"ID":"141c65d6-e8c4-4ae3-be7b-7adf75193efc","Type":"ContainerStarted","Data":"b0f159328b9e4158a5aa617c6ccb3b6c364312efe2e0b863e3393649312922a6"} Oct 11 04:12:15 crc kubenswrapper[4798]: I1011 04:12:15.907432 4798 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-5f7f9f7cbf-rnw8v" podUID="6008f2a1-8398-4d56-a3a2-c77e8923a800" containerName="dnsmasq-dns" containerID="cri-o://e05a865f2d237639713abc42cfa5f8b52aeb1a31f0fc43cf02f7077997ae7450" gracePeriod=10 Oct 11 04:12:15 crc kubenswrapper[4798]: I1011 04:12:15.949381 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-keystone-listener-6574767f6b-8p24k" podStartSLOduration=3.382077747 podStartE2EDuration="29.947572405s" podCreationTimestamp="2025-10-11 04:11:46 +0000 UTC" firstStartedPulling="2025-10-11 04:11:48.442698093 +0000 UTC m=+1003.778987779" lastFinishedPulling="2025-10-11 04:12:15.008192751 +0000 UTC m=+1030.344482437" observedRunningTime="2025-10-11 04:12:15.930010775 +0000 UTC m=+1031.266300471" watchObservedRunningTime="2025-10-11 04:12:15.947572405 +0000 UTC m=+1031.283862091" Oct 11 04:12:15 crc kubenswrapper[4798]: I1011 04:12:15.964613 4798 scope.go:117] "RemoveContainer" containerID="8e880984e4e0abf24bb17403955cdc66b1ac83cf5d48ca6d846923e18e9bbfdb" Oct 11 04:12:16 crc kubenswrapper[4798]: I1011 04:12:16.006556 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Oct 11 04:12:16 crc kubenswrapper[4798]: I1011 04:12:16.029119 4798 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-api-0"] Oct 11 04:12:16 crc kubenswrapper[4798]: I1011 04:12:16.029908 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Oct 11 04:12:16 crc kubenswrapper[4798]: E1011 04:12:16.030369 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1969fe5d-5381-4726-b380-6e3e89e3c68b" containerName="cinder-api" Oct 11 04:12:16 crc kubenswrapper[4798]: I1011 04:12:16.030385 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="1969fe5d-5381-4726-b380-6e3e89e3c68b" containerName="cinder-api" Oct 11 04:12:16 crc kubenswrapper[4798]: E1011 04:12:16.030414 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1969fe5d-5381-4726-b380-6e3e89e3c68b" containerName="cinder-api-log" Oct 11 04:12:16 crc kubenswrapper[4798]: I1011 04:12:16.030421 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="1969fe5d-5381-4726-b380-6e3e89e3c68b" containerName="cinder-api-log" Oct 11 04:12:16 crc kubenswrapper[4798]: I1011 04:12:16.040668 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="1969fe5d-5381-4726-b380-6e3e89e3c68b" containerName="cinder-api" Oct 11 04:12:16 crc kubenswrapper[4798]: I1011 04:12:16.040746 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="1969fe5d-5381-4726-b380-6e3e89e3c68b" containerName="cinder-api-log" Oct 11 04:12:16 crc kubenswrapper[4798]: I1011 04:12:16.052673 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Oct 11 04:12:16 crc kubenswrapper[4798]: I1011 04:12:16.103163 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-internal-svc" Oct 11 04:12:16 crc kubenswrapper[4798]: I1011 04:12:16.105732 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-public-svc" Oct 11 04:12:16 crc kubenswrapper[4798]: I1011 04:12:16.124639 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Oct 11 04:12:16 crc kubenswrapper[4798]: I1011 04:12:16.136372 4798 scope.go:117] "RemoveContainer" containerID="9d3744cb37ea1d3c48ac547b475f811936eafe9b1b2602e4eaa6556f50bb83e5" Oct 11 04:12:16 crc kubenswrapper[4798]: E1011 04:12:16.150167 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9d3744cb37ea1d3c48ac547b475f811936eafe9b1b2602e4eaa6556f50bb83e5\": container with ID starting with 9d3744cb37ea1d3c48ac547b475f811936eafe9b1b2602e4eaa6556f50bb83e5 not found: ID does not exist" containerID="9d3744cb37ea1d3c48ac547b475f811936eafe9b1b2602e4eaa6556f50bb83e5" Oct 11 04:12:16 crc kubenswrapper[4798]: I1011 04:12:16.150219 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9d3744cb37ea1d3c48ac547b475f811936eafe9b1b2602e4eaa6556f50bb83e5"} err="failed to get container status \"9d3744cb37ea1d3c48ac547b475f811936eafe9b1b2602e4eaa6556f50bb83e5\": rpc error: code = NotFound desc = could not find container \"9d3744cb37ea1d3c48ac547b475f811936eafe9b1b2602e4eaa6556f50bb83e5\": container with ID starting with 9d3744cb37ea1d3c48ac547b475f811936eafe9b1b2602e4eaa6556f50bb83e5 not found: ID does not exist" Oct 11 04:12:16 crc kubenswrapper[4798]: I1011 04:12:16.150262 4798 scope.go:117] "RemoveContainer" containerID="8e880984e4e0abf24bb17403955cdc66b1ac83cf5d48ca6d846923e18e9bbfdb" Oct 11 04:12:16 crc kubenswrapper[4798]: I1011 04:12:16.165181 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/b454f77b-5618-4f7e-b603-9fec6030c732-etc-machine-id\") pod \"cinder-api-0\" (UID: \"b454f77b-5618-4f7e-b603-9fec6030c732\") " pod="openstack/cinder-api-0" Oct 11 04:12:16 crc kubenswrapper[4798]: I1011 04:12:16.165260 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b454f77b-5618-4f7e-b603-9fec6030c732-logs\") pod \"cinder-api-0\" (UID: \"b454f77b-5618-4f7e-b603-9fec6030c732\") " pod="openstack/cinder-api-0" Oct 11 04:12:16 crc kubenswrapper[4798]: I1011 04:12:16.165326 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/b454f77b-5618-4f7e-b603-9fec6030c732-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"b454f77b-5618-4f7e-b603-9fec6030c732\") " pod="openstack/cinder-api-0" Oct 11 04:12:16 crc kubenswrapper[4798]: I1011 04:12:16.165352 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b454f77b-5618-4f7e-b603-9fec6030c732-public-tls-certs\") pod \"cinder-api-0\" (UID: \"b454f77b-5618-4f7e-b603-9fec6030c732\") " pod="openstack/cinder-api-0" Oct 11 04:12:16 crc kubenswrapper[4798]: I1011 04:12:16.165382 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b454f77b-5618-4f7e-b603-9fec6030c732-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"b454f77b-5618-4f7e-b603-9fec6030c732\") " pod="openstack/cinder-api-0" Oct 11 04:12:16 crc kubenswrapper[4798]: I1011 04:12:16.165557 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b454f77b-5618-4f7e-b603-9fec6030c732-config-data-custom\") pod \"cinder-api-0\" (UID: \"b454f77b-5618-4f7e-b603-9fec6030c732\") " pod="openstack/cinder-api-0" Oct 11 04:12:16 crc kubenswrapper[4798]: I1011 04:12:16.165585 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t2bt8\" (UniqueName: \"kubernetes.io/projected/b454f77b-5618-4f7e-b603-9fec6030c732-kube-api-access-t2bt8\") pod \"cinder-api-0\" (UID: \"b454f77b-5618-4f7e-b603-9fec6030c732\") " pod="openstack/cinder-api-0" Oct 11 04:12:16 crc kubenswrapper[4798]: I1011 04:12:16.165654 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b454f77b-5618-4f7e-b603-9fec6030c732-scripts\") pod \"cinder-api-0\" (UID: \"b454f77b-5618-4f7e-b603-9fec6030c732\") " pod="openstack/cinder-api-0" Oct 11 04:12:16 crc kubenswrapper[4798]: I1011 04:12:16.165692 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b454f77b-5618-4f7e-b603-9fec6030c732-config-data\") pod \"cinder-api-0\" (UID: \"b454f77b-5618-4f7e-b603-9fec6030c732\") " pod="openstack/cinder-api-0" Oct 11 04:12:16 crc kubenswrapper[4798]: I1011 04:12:16.184491 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Oct 11 04:12:16 crc kubenswrapper[4798]: E1011 04:12:16.184882 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8e880984e4e0abf24bb17403955cdc66b1ac83cf5d48ca6d846923e18e9bbfdb\": container with ID starting with 8e880984e4e0abf24bb17403955cdc66b1ac83cf5d48ca6d846923e18e9bbfdb not found: ID does not exist" containerID="8e880984e4e0abf24bb17403955cdc66b1ac83cf5d48ca6d846923e18e9bbfdb" Oct 11 04:12:16 crc kubenswrapper[4798]: I1011 04:12:16.184938 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8e880984e4e0abf24bb17403955cdc66b1ac83cf5d48ca6d846923e18e9bbfdb"} err="failed to get container status \"8e880984e4e0abf24bb17403955cdc66b1ac83cf5d48ca6d846923e18e9bbfdb\": rpc error: code = NotFound desc = could not find container \"8e880984e4e0abf24bb17403955cdc66b1ac83cf5d48ca6d846923e18e9bbfdb\": container with ID starting with 8e880984e4e0abf24bb17403955cdc66b1ac83cf5d48ca6d846923e18e9bbfdb not found: ID does not exist" Oct 11 04:12:16 crc kubenswrapper[4798]: I1011 04:12:16.184967 4798 scope.go:117] "RemoveContainer" containerID="9d3744cb37ea1d3c48ac547b475f811936eafe9b1b2602e4eaa6556f50bb83e5" Oct 11 04:12:16 crc kubenswrapper[4798]: I1011 04:12:16.196738 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9d3744cb37ea1d3c48ac547b475f811936eafe9b1b2602e4eaa6556f50bb83e5"} err="failed to get container status \"9d3744cb37ea1d3c48ac547b475f811936eafe9b1b2602e4eaa6556f50bb83e5\": rpc error: code = NotFound desc = could not find container \"9d3744cb37ea1d3c48ac547b475f811936eafe9b1b2602e4eaa6556f50bb83e5\": container with ID starting with 9d3744cb37ea1d3c48ac547b475f811936eafe9b1b2602e4eaa6556f50bb83e5 not found: ID does not exist" Oct 11 04:12:16 crc kubenswrapper[4798]: I1011 04:12:16.196808 4798 scope.go:117] "RemoveContainer" containerID="8e880984e4e0abf24bb17403955cdc66b1ac83cf5d48ca6d846923e18e9bbfdb" Oct 11 04:12:16 crc kubenswrapper[4798]: I1011 04:12:16.206040 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8e880984e4e0abf24bb17403955cdc66b1ac83cf5d48ca6d846923e18e9bbfdb"} err="failed to get container status \"8e880984e4e0abf24bb17403955cdc66b1ac83cf5d48ca6d846923e18e9bbfdb\": rpc error: code = NotFound desc = could not find container \"8e880984e4e0abf24bb17403955cdc66b1ac83cf5d48ca6d846923e18e9bbfdb\": container with ID starting with 8e880984e4e0abf24bb17403955cdc66b1ac83cf5d48ca6d846923e18e9bbfdb not found: ID does not exist" Oct 11 04:12:16 crc kubenswrapper[4798]: I1011 04:12:16.271060 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/b454f77b-5618-4f7e-b603-9fec6030c732-etc-machine-id\") pod \"cinder-api-0\" (UID: \"b454f77b-5618-4f7e-b603-9fec6030c732\") " pod="openstack/cinder-api-0" Oct 11 04:12:16 crc kubenswrapper[4798]: I1011 04:12:16.271118 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b454f77b-5618-4f7e-b603-9fec6030c732-logs\") pod \"cinder-api-0\" (UID: \"b454f77b-5618-4f7e-b603-9fec6030c732\") " pod="openstack/cinder-api-0" Oct 11 04:12:16 crc kubenswrapper[4798]: I1011 04:12:16.271176 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/b454f77b-5618-4f7e-b603-9fec6030c732-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"b454f77b-5618-4f7e-b603-9fec6030c732\") " pod="openstack/cinder-api-0" Oct 11 04:12:16 crc kubenswrapper[4798]: I1011 04:12:16.271196 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b454f77b-5618-4f7e-b603-9fec6030c732-public-tls-certs\") pod \"cinder-api-0\" (UID: \"b454f77b-5618-4f7e-b603-9fec6030c732\") " pod="openstack/cinder-api-0" Oct 11 04:12:16 crc kubenswrapper[4798]: I1011 04:12:16.271214 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b454f77b-5618-4f7e-b603-9fec6030c732-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"b454f77b-5618-4f7e-b603-9fec6030c732\") " pod="openstack/cinder-api-0" Oct 11 04:12:16 crc kubenswrapper[4798]: I1011 04:12:16.271234 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b454f77b-5618-4f7e-b603-9fec6030c732-config-data-custom\") pod \"cinder-api-0\" (UID: \"b454f77b-5618-4f7e-b603-9fec6030c732\") " pod="openstack/cinder-api-0" Oct 11 04:12:16 crc kubenswrapper[4798]: I1011 04:12:16.271257 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t2bt8\" (UniqueName: \"kubernetes.io/projected/b454f77b-5618-4f7e-b603-9fec6030c732-kube-api-access-t2bt8\") pod \"cinder-api-0\" (UID: \"b454f77b-5618-4f7e-b603-9fec6030c732\") " pod="openstack/cinder-api-0" Oct 11 04:12:16 crc kubenswrapper[4798]: I1011 04:12:16.271295 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b454f77b-5618-4f7e-b603-9fec6030c732-scripts\") pod \"cinder-api-0\" (UID: \"b454f77b-5618-4f7e-b603-9fec6030c732\") " pod="openstack/cinder-api-0" Oct 11 04:12:16 crc kubenswrapper[4798]: I1011 04:12:16.271311 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b454f77b-5618-4f7e-b603-9fec6030c732-config-data\") pod \"cinder-api-0\" (UID: \"b454f77b-5618-4f7e-b603-9fec6030c732\") " pod="openstack/cinder-api-0" Oct 11 04:12:16 crc kubenswrapper[4798]: I1011 04:12:16.273116 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/b454f77b-5618-4f7e-b603-9fec6030c732-etc-machine-id\") pod \"cinder-api-0\" (UID: \"b454f77b-5618-4f7e-b603-9fec6030c732\") " pod="openstack/cinder-api-0" Oct 11 04:12:16 crc kubenswrapper[4798]: I1011 04:12:16.274104 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b454f77b-5618-4f7e-b603-9fec6030c732-logs\") pod \"cinder-api-0\" (UID: \"b454f77b-5618-4f7e-b603-9fec6030c732\") " pod="openstack/cinder-api-0" Oct 11 04:12:16 crc kubenswrapper[4798]: I1011 04:12:16.279488 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b454f77b-5618-4f7e-b603-9fec6030c732-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"b454f77b-5618-4f7e-b603-9fec6030c732\") " pod="openstack/cinder-api-0" Oct 11 04:12:16 crc kubenswrapper[4798]: I1011 04:12:16.283828 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b454f77b-5618-4f7e-b603-9fec6030c732-config-data\") pod \"cinder-api-0\" (UID: \"b454f77b-5618-4f7e-b603-9fec6030c732\") " pod="openstack/cinder-api-0" Oct 11 04:12:16 crc kubenswrapper[4798]: I1011 04:12:16.290109 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/b454f77b-5618-4f7e-b603-9fec6030c732-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"b454f77b-5618-4f7e-b603-9fec6030c732\") " pod="openstack/cinder-api-0" Oct 11 04:12:16 crc kubenswrapper[4798]: I1011 04:12:16.299351 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b454f77b-5618-4f7e-b603-9fec6030c732-public-tls-certs\") pod \"cinder-api-0\" (UID: \"b454f77b-5618-4f7e-b603-9fec6030c732\") " pod="openstack/cinder-api-0" Oct 11 04:12:16 crc kubenswrapper[4798]: I1011 04:12:16.299964 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b454f77b-5618-4f7e-b603-9fec6030c732-scripts\") pod \"cinder-api-0\" (UID: \"b454f77b-5618-4f7e-b603-9fec6030c732\") " pod="openstack/cinder-api-0" Oct 11 04:12:16 crc kubenswrapper[4798]: I1011 04:12:16.320185 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b454f77b-5618-4f7e-b603-9fec6030c732-config-data-custom\") pod \"cinder-api-0\" (UID: \"b454f77b-5618-4f7e-b603-9fec6030c732\") " pod="openstack/cinder-api-0" Oct 11 04:12:16 crc kubenswrapper[4798]: I1011 04:12:16.338886 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t2bt8\" (UniqueName: \"kubernetes.io/projected/b454f77b-5618-4f7e-b603-9fec6030c732-kube-api-access-t2bt8\") pod \"cinder-api-0\" (UID: \"b454f77b-5618-4f7e-b603-9fec6030c732\") " pod="openstack/cinder-api-0" Oct 11 04:12:16 crc kubenswrapper[4798]: I1011 04:12:16.484037 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Oct 11 04:12:16 crc kubenswrapper[4798]: I1011 04:12:16.514920 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-68596b54cd-66kzb" Oct 11 04:12:16 crc kubenswrapper[4798]: I1011 04:12:16.597877 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5f7f9f7cbf-rnw8v" Oct 11 04:12:16 crc kubenswrapper[4798]: I1011 04:12:16.647453 4798 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Oct 11 04:12:16 crc kubenswrapper[4798]: I1011 04:12:16.682584 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a5162cce-7f28-490f-8a4d-f1d10824cafa-config-data\") pod \"a5162cce-7f28-490f-8a4d-f1d10824cafa\" (UID: \"a5162cce-7f28-490f-8a4d-f1d10824cafa\") " Oct 11 04:12:16 crc kubenswrapper[4798]: I1011 04:12:16.682756 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a5162cce-7f28-490f-8a4d-f1d10824cafa-combined-ca-bundle\") pod \"a5162cce-7f28-490f-8a4d-f1d10824cafa\" (UID: \"a5162cce-7f28-490f-8a4d-f1d10824cafa\") " Oct 11 04:12:16 crc kubenswrapper[4798]: I1011 04:12:16.682845 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a5162cce-7f28-490f-8a4d-f1d10824cafa-logs\") pod \"a5162cce-7f28-490f-8a4d-f1d10824cafa\" (UID: \"a5162cce-7f28-490f-8a4d-f1d10824cafa\") " Oct 11 04:12:16 crc kubenswrapper[4798]: I1011 04:12:16.682990 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rp5bm\" (UniqueName: \"kubernetes.io/projected/a5162cce-7f28-490f-8a4d-f1d10824cafa-kube-api-access-rp5bm\") pod \"a5162cce-7f28-490f-8a4d-f1d10824cafa\" (UID: \"a5162cce-7f28-490f-8a4d-f1d10824cafa\") " Oct 11 04:12:16 crc kubenswrapper[4798]: I1011 04:12:16.683224 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vsljj\" (UniqueName: \"kubernetes.io/projected/6008f2a1-8398-4d56-a3a2-c77e8923a800-kube-api-access-vsljj\") pod \"6008f2a1-8398-4d56-a3a2-c77e8923a800\" (UID: \"6008f2a1-8398-4d56-a3a2-c77e8923a800\") " Oct 11 04:12:16 crc kubenswrapper[4798]: I1011 04:12:16.683277 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6008f2a1-8398-4d56-a3a2-c77e8923a800-config\") pod \"6008f2a1-8398-4d56-a3a2-c77e8923a800\" (UID: \"6008f2a1-8398-4d56-a3a2-c77e8923a800\") " Oct 11 04:12:16 crc kubenswrapper[4798]: I1011 04:12:16.683312 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6008f2a1-8398-4d56-a3a2-c77e8923a800-ovsdbserver-nb\") pod \"6008f2a1-8398-4d56-a3a2-c77e8923a800\" (UID: \"6008f2a1-8398-4d56-a3a2-c77e8923a800\") " Oct 11 04:12:16 crc kubenswrapper[4798]: I1011 04:12:16.683363 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a5162cce-7f28-490f-8a4d-f1d10824cafa-config-data-custom\") pod \"a5162cce-7f28-490f-8a4d-f1d10824cafa\" (UID: \"a5162cce-7f28-490f-8a4d-f1d10824cafa\") " Oct 11 04:12:16 crc kubenswrapper[4798]: I1011 04:12:16.683422 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6008f2a1-8398-4d56-a3a2-c77e8923a800-ovsdbserver-sb\") pod \"6008f2a1-8398-4d56-a3a2-c77e8923a800\" (UID: \"6008f2a1-8398-4d56-a3a2-c77e8923a800\") " Oct 11 04:12:16 crc kubenswrapper[4798]: I1011 04:12:16.683491 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6008f2a1-8398-4d56-a3a2-c77e8923a800-dns-svc\") pod \"6008f2a1-8398-4d56-a3a2-c77e8923a800\" (UID: \"6008f2a1-8398-4d56-a3a2-c77e8923a800\") " Oct 11 04:12:16 crc kubenswrapper[4798]: I1011 04:12:16.691645 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a5162cce-7f28-490f-8a4d-f1d10824cafa-logs" (OuterVolumeSpecName: "logs") pod "a5162cce-7f28-490f-8a4d-f1d10824cafa" (UID: "a5162cce-7f28-490f-8a4d-f1d10824cafa"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 04:12:16 crc kubenswrapper[4798]: I1011 04:12:16.696876 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a5162cce-7f28-490f-8a4d-f1d10824cafa-kube-api-access-rp5bm" (OuterVolumeSpecName: "kube-api-access-rp5bm") pod "a5162cce-7f28-490f-8a4d-f1d10824cafa" (UID: "a5162cce-7f28-490f-8a4d-f1d10824cafa"). InnerVolumeSpecName "kube-api-access-rp5bm". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 04:12:16 crc kubenswrapper[4798]: I1011 04:12:16.706965 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6008f2a1-8398-4d56-a3a2-c77e8923a800-kube-api-access-vsljj" (OuterVolumeSpecName: "kube-api-access-vsljj") pod "6008f2a1-8398-4d56-a3a2-c77e8923a800" (UID: "6008f2a1-8398-4d56-a3a2-c77e8923a800"). InnerVolumeSpecName "kube-api-access-vsljj". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 04:12:16 crc kubenswrapper[4798]: I1011 04:12:16.715434 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a5162cce-7f28-490f-8a4d-f1d10824cafa-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "a5162cce-7f28-490f-8a4d-f1d10824cafa" (UID: "a5162cce-7f28-490f-8a4d-f1d10824cafa"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:12:16 crc kubenswrapper[4798]: I1011 04:12:16.783554 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6008f2a1-8398-4d56-a3a2-c77e8923a800-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "6008f2a1-8398-4d56-a3a2-c77e8923a800" (UID: "6008f2a1-8398-4d56-a3a2-c77e8923a800"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 04:12:16 crc kubenswrapper[4798]: I1011 04:12:16.785566 4798 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6008f2a1-8398-4d56-a3a2-c77e8923a800-dns-svc\") on node \"crc\" DevicePath \"\"" Oct 11 04:12:16 crc kubenswrapper[4798]: I1011 04:12:16.785604 4798 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a5162cce-7f28-490f-8a4d-f1d10824cafa-logs\") on node \"crc\" DevicePath \"\"" Oct 11 04:12:16 crc kubenswrapper[4798]: I1011 04:12:16.785613 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rp5bm\" (UniqueName: \"kubernetes.io/projected/a5162cce-7f28-490f-8a4d-f1d10824cafa-kube-api-access-rp5bm\") on node \"crc\" DevicePath \"\"" Oct 11 04:12:16 crc kubenswrapper[4798]: I1011 04:12:16.785627 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vsljj\" (UniqueName: \"kubernetes.io/projected/6008f2a1-8398-4d56-a3a2-c77e8923a800-kube-api-access-vsljj\") on node \"crc\" DevicePath \"\"" Oct 11 04:12:16 crc kubenswrapper[4798]: I1011 04:12:16.785636 4798 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a5162cce-7f28-490f-8a4d-f1d10824cafa-config-data-custom\") on node \"crc\" DevicePath \"\"" Oct 11 04:12:16 crc kubenswrapper[4798]: I1011 04:12:16.813714 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6008f2a1-8398-4d56-a3a2-c77e8923a800-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "6008f2a1-8398-4d56-a3a2-c77e8923a800" (UID: "6008f2a1-8398-4d56-a3a2-c77e8923a800"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 04:12:16 crc kubenswrapper[4798]: I1011 04:12:16.818943 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a5162cce-7f28-490f-8a4d-f1d10824cafa-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a5162cce-7f28-490f-8a4d-f1d10824cafa" (UID: "a5162cce-7f28-490f-8a4d-f1d10824cafa"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:12:16 crc kubenswrapper[4798]: I1011 04:12:16.853939 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6008f2a1-8398-4d56-a3a2-c77e8923a800-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "6008f2a1-8398-4d56-a3a2-c77e8923a800" (UID: "6008f2a1-8398-4d56-a3a2-c77e8923a800"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 04:12:16 crc kubenswrapper[4798]: I1011 04:12:16.859673 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a5162cce-7f28-490f-8a4d-f1d10824cafa-config-data" (OuterVolumeSpecName: "config-data") pod "a5162cce-7f28-490f-8a4d-f1d10824cafa" (UID: "a5162cce-7f28-490f-8a4d-f1d10824cafa"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:12:16 crc kubenswrapper[4798]: I1011 04:12:16.860144 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6008f2a1-8398-4d56-a3a2-c77e8923a800-config" (OuterVolumeSpecName: "config") pod "6008f2a1-8398-4d56-a3a2-c77e8923a800" (UID: "6008f2a1-8398-4d56-a3a2-c77e8923a800"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 04:12:16 crc kubenswrapper[4798]: I1011 04:12:16.891159 4798 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a5162cce-7f28-490f-8a4d-f1d10824cafa-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 11 04:12:16 crc kubenswrapper[4798]: I1011 04:12:16.891209 4798 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6008f2a1-8398-4d56-a3a2-c77e8923a800-config\") on node \"crc\" DevicePath \"\"" Oct 11 04:12:16 crc kubenswrapper[4798]: I1011 04:12:16.891224 4798 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6008f2a1-8398-4d56-a3a2-c77e8923a800-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Oct 11 04:12:16 crc kubenswrapper[4798]: I1011 04:12:16.891237 4798 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6008f2a1-8398-4d56-a3a2-c77e8923a800-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Oct 11 04:12:16 crc kubenswrapper[4798]: I1011 04:12:16.891247 4798 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a5162cce-7f28-490f-8a4d-f1d10824cafa-config-data\") on node \"crc\" DevicePath \"\"" Oct 11 04:12:16 crc kubenswrapper[4798]: I1011 04:12:16.930567 4798 generic.go:334] "Generic (PLEG): container finished" podID="6008f2a1-8398-4d56-a3a2-c77e8923a800" containerID="e05a865f2d237639713abc42cfa5f8b52aeb1a31f0fc43cf02f7077997ae7450" exitCode=0 Oct 11 04:12:16 crc kubenswrapper[4798]: I1011 04:12:16.930643 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5f7f9f7cbf-rnw8v" event={"ID":"6008f2a1-8398-4d56-a3a2-c77e8923a800","Type":"ContainerDied","Data":"e05a865f2d237639713abc42cfa5f8b52aeb1a31f0fc43cf02f7077997ae7450"} Oct 11 04:12:16 crc kubenswrapper[4798]: I1011 04:12:16.930679 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5f7f9f7cbf-rnw8v" event={"ID":"6008f2a1-8398-4d56-a3a2-c77e8923a800","Type":"ContainerDied","Data":"0db7c1364564c0ab358a83e48a84695d4dcb7cf13d9e568aa0b27a5027207262"} Oct 11 04:12:16 crc kubenswrapper[4798]: I1011 04:12:16.930702 4798 scope.go:117] "RemoveContainer" containerID="e05a865f2d237639713abc42cfa5f8b52aeb1a31f0fc43cf02f7077997ae7450" Oct 11 04:12:16 crc kubenswrapper[4798]: I1011 04:12:16.930829 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5f7f9f7cbf-rnw8v" Oct 11 04:12:16 crc kubenswrapper[4798]: I1011 04:12:16.952808 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-766995c844-7nlzq" event={"ID":"eb2fdf9e-64f1-40bb-b964-73bbe357085c","Type":"ContainerStarted","Data":"566ced5a5f4c94b8d17adf7b2ff3d1968e3e9d6042c791cf6d12e5ca23768760"} Oct 11 04:12:16 crc kubenswrapper[4798]: I1011 04:12:16.953692 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-766995c844-7nlzq" Oct 11 04:12:16 crc kubenswrapper[4798]: I1011 04:12:16.961640 4798 generic.go:334] "Generic (PLEG): container finished" podID="a5162cce-7f28-490f-8a4d-f1d10824cafa" containerID="a669bc49245b6310c09180cf470126e8dade49e6594de28edd1f013521855cf3" exitCode=0 Oct 11 04:12:16 crc kubenswrapper[4798]: I1011 04:12:16.961716 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-68596b54cd-66kzb" event={"ID":"a5162cce-7f28-490f-8a4d-f1d10824cafa","Type":"ContainerDied","Data":"a669bc49245b6310c09180cf470126e8dade49e6594de28edd1f013521855cf3"} Oct 11 04:12:16 crc kubenswrapper[4798]: I1011 04:12:16.961739 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-68596b54cd-66kzb" event={"ID":"a5162cce-7f28-490f-8a4d-f1d10824cafa","Type":"ContainerDied","Data":"6a8eb0d0d05fe3eb8c85c0c11f11d38285e787dc14bb01e7868229819e59ae2e"} Oct 11 04:12:16 crc kubenswrapper[4798]: I1011 04:12:16.965175 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-68596b54cd-66kzb" Oct 11 04:12:16 crc kubenswrapper[4798]: I1011 04:12:16.972888 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5f7f9f7cbf-rnw8v"] Oct 11 04:12:16 crc kubenswrapper[4798]: I1011 04:12:16.977695 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-58db5546cc-zfln8" event={"ID":"c2b0d4f5-de54-44c6-9c2c-c4285ffbbe78","Type":"ContainerStarted","Data":"2e80eb6cff4b8143ea305b056d223b400a392f6ca6eb1d542cba8dd6413cc8c5"} Oct 11 04:12:16 crc kubenswrapper[4798]: I1011 04:12:16.977845 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-58db5546cc-zfln8" Oct 11 04:12:16 crc kubenswrapper[4798]: I1011 04:12:16.987539 4798 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5f7f9f7cbf-rnw8v"] Oct 11 04:12:17 crc kubenswrapper[4798]: I1011 04:12:17.013014 4798 scope.go:117] "RemoveContainer" containerID="d6873293387f7a0b478188e11689e927740161c01463ef54f77a859fb3a91fa9" Oct 11 04:12:17 crc kubenswrapper[4798]: I1011 04:12:17.023562 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-766995c844-7nlzq" podStartSLOduration=3.02354176 podStartE2EDuration="3.02354176s" podCreationTimestamp="2025-10-11 04:12:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 04:12:16.995773857 +0000 UTC m=+1032.332063543" watchObservedRunningTime="2025-10-11 04:12:17.02354176 +0000 UTC m=+1032.359831446" Oct 11 04:12:17 crc kubenswrapper[4798]: I1011 04:12:17.034683 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-68596b54cd-66kzb"] Oct 11 04:12:17 crc kubenswrapper[4798]: I1011 04:12:17.054062 4798 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-api-68596b54cd-66kzb"] Oct 11 04:12:17 crc kubenswrapper[4798]: I1011 04:12:17.065342 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-58db5546cc-zfln8" podStartSLOduration=4.065302867 podStartE2EDuration="4.065302867s" podCreationTimestamp="2025-10-11 04:12:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 04:12:17.048801833 +0000 UTC m=+1032.385091529" watchObservedRunningTime="2025-10-11 04:12:17.065302867 +0000 UTC m=+1032.401592563" Oct 11 04:12:17 crc kubenswrapper[4798]: I1011 04:12:17.083928 4798 scope.go:117] "RemoveContainer" containerID="e05a865f2d237639713abc42cfa5f8b52aeb1a31f0fc43cf02f7077997ae7450" Oct 11 04:12:17 crc kubenswrapper[4798]: E1011 04:12:17.084744 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e05a865f2d237639713abc42cfa5f8b52aeb1a31f0fc43cf02f7077997ae7450\": container with ID starting with e05a865f2d237639713abc42cfa5f8b52aeb1a31f0fc43cf02f7077997ae7450 not found: ID does not exist" containerID="e05a865f2d237639713abc42cfa5f8b52aeb1a31f0fc43cf02f7077997ae7450" Oct 11 04:12:17 crc kubenswrapper[4798]: I1011 04:12:17.084788 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e05a865f2d237639713abc42cfa5f8b52aeb1a31f0fc43cf02f7077997ae7450"} err="failed to get container status \"e05a865f2d237639713abc42cfa5f8b52aeb1a31f0fc43cf02f7077997ae7450\": rpc error: code = NotFound desc = could not find container \"e05a865f2d237639713abc42cfa5f8b52aeb1a31f0fc43cf02f7077997ae7450\": container with ID starting with e05a865f2d237639713abc42cfa5f8b52aeb1a31f0fc43cf02f7077997ae7450 not found: ID does not exist" Oct 11 04:12:17 crc kubenswrapper[4798]: I1011 04:12:17.084813 4798 scope.go:117] "RemoveContainer" containerID="d6873293387f7a0b478188e11689e927740161c01463ef54f77a859fb3a91fa9" Oct 11 04:12:17 crc kubenswrapper[4798]: E1011 04:12:17.085222 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d6873293387f7a0b478188e11689e927740161c01463ef54f77a859fb3a91fa9\": container with ID starting with d6873293387f7a0b478188e11689e927740161c01463ef54f77a859fb3a91fa9 not found: ID does not exist" containerID="d6873293387f7a0b478188e11689e927740161c01463ef54f77a859fb3a91fa9" Oct 11 04:12:17 crc kubenswrapper[4798]: I1011 04:12:17.085250 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d6873293387f7a0b478188e11689e927740161c01463ef54f77a859fb3a91fa9"} err="failed to get container status \"d6873293387f7a0b478188e11689e927740161c01463ef54f77a859fb3a91fa9\": rpc error: code = NotFound desc = could not find container \"d6873293387f7a0b478188e11689e927740161c01463ef54f77a859fb3a91fa9\": container with ID starting with d6873293387f7a0b478188e11689e927740161c01463ef54f77a859fb3a91fa9 not found: ID does not exist" Oct 11 04:12:17 crc kubenswrapper[4798]: I1011 04:12:17.085266 4798 scope.go:117] "RemoveContainer" containerID="a669bc49245b6310c09180cf470126e8dade49e6594de28edd1f013521855cf3" Oct 11 04:12:17 crc kubenswrapper[4798]: I1011 04:12:17.129797 4798 scope.go:117] "RemoveContainer" containerID="5d5f151c738d87a7b83579cfc83f1e6f46d64e19e01dae21a068939d62323028" Oct 11 04:12:17 crc kubenswrapper[4798]: I1011 04:12:17.160370 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Oct 11 04:12:17 crc kubenswrapper[4798]: I1011 04:12:17.162091 4798 scope.go:117] "RemoveContainer" containerID="a669bc49245b6310c09180cf470126e8dade49e6594de28edd1f013521855cf3" Oct 11 04:12:17 crc kubenswrapper[4798]: E1011 04:12:17.162679 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a669bc49245b6310c09180cf470126e8dade49e6594de28edd1f013521855cf3\": container with ID starting with a669bc49245b6310c09180cf470126e8dade49e6594de28edd1f013521855cf3 not found: ID does not exist" containerID="a669bc49245b6310c09180cf470126e8dade49e6594de28edd1f013521855cf3" Oct 11 04:12:17 crc kubenswrapper[4798]: I1011 04:12:17.162767 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a669bc49245b6310c09180cf470126e8dade49e6594de28edd1f013521855cf3"} err="failed to get container status \"a669bc49245b6310c09180cf470126e8dade49e6594de28edd1f013521855cf3\": rpc error: code = NotFound desc = could not find container \"a669bc49245b6310c09180cf470126e8dade49e6594de28edd1f013521855cf3\": container with ID starting with a669bc49245b6310c09180cf470126e8dade49e6594de28edd1f013521855cf3 not found: ID does not exist" Oct 11 04:12:17 crc kubenswrapper[4798]: I1011 04:12:17.162836 4798 scope.go:117] "RemoveContainer" containerID="5d5f151c738d87a7b83579cfc83f1e6f46d64e19e01dae21a068939d62323028" Oct 11 04:12:17 crc kubenswrapper[4798]: E1011 04:12:17.163368 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5d5f151c738d87a7b83579cfc83f1e6f46d64e19e01dae21a068939d62323028\": container with ID starting with 5d5f151c738d87a7b83579cfc83f1e6f46d64e19e01dae21a068939d62323028 not found: ID does not exist" containerID="5d5f151c738d87a7b83579cfc83f1e6f46d64e19e01dae21a068939d62323028" Oct 11 04:12:17 crc kubenswrapper[4798]: I1011 04:12:17.163483 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5d5f151c738d87a7b83579cfc83f1e6f46d64e19e01dae21a068939d62323028"} err="failed to get container status \"5d5f151c738d87a7b83579cfc83f1e6f46d64e19e01dae21a068939d62323028\": rpc error: code = NotFound desc = could not find container \"5d5f151c738d87a7b83579cfc83f1e6f46d64e19e01dae21a068939d62323028\": container with ID starting with 5d5f151c738d87a7b83579cfc83f1e6f46d64e19e01dae21a068939d62323028 not found: ID does not exist" Oct 11 04:12:17 crc kubenswrapper[4798]: W1011 04:12:17.174870 4798 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb454f77b_5618_4f7e_b603_9fec6030c732.slice/crio-dc2e3c897404f8bae8a29616703374c4eb6267a160e49be16512c074b1a043c8 WatchSource:0}: Error finding container dc2e3c897404f8bae8a29616703374c4eb6267a160e49be16512c074b1a043c8: Status 404 returned error can't find the container with id dc2e3c897404f8bae8a29616703374c4eb6267a160e49be16512c074b1a043c8 Oct 11 04:12:17 crc kubenswrapper[4798]: I1011 04:12:17.416198 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-5744f55857-rnj42"] Oct 11 04:12:17 crc kubenswrapper[4798]: E1011 04:12:17.417991 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a5162cce-7f28-490f-8a4d-f1d10824cafa" containerName="barbican-api" Oct 11 04:12:17 crc kubenswrapper[4798]: I1011 04:12:17.418019 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="a5162cce-7f28-490f-8a4d-f1d10824cafa" containerName="barbican-api" Oct 11 04:12:17 crc kubenswrapper[4798]: E1011 04:12:17.418047 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6008f2a1-8398-4d56-a3a2-c77e8923a800" containerName="init" Oct 11 04:12:17 crc kubenswrapper[4798]: I1011 04:12:17.418055 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="6008f2a1-8398-4d56-a3a2-c77e8923a800" containerName="init" Oct 11 04:12:17 crc kubenswrapper[4798]: E1011 04:12:17.418095 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a5162cce-7f28-490f-8a4d-f1d10824cafa" containerName="barbican-api-log" Oct 11 04:12:17 crc kubenswrapper[4798]: I1011 04:12:17.418102 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="a5162cce-7f28-490f-8a4d-f1d10824cafa" containerName="barbican-api-log" Oct 11 04:12:17 crc kubenswrapper[4798]: E1011 04:12:17.418115 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6008f2a1-8398-4d56-a3a2-c77e8923a800" containerName="dnsmasq-dns" Oct 11 04:12:17 crc kubenswrapper[4798]: I1011 04:12:17.418122 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="6008f2a1-8398-4d56-a3a2-c77e8923a800" containerName="dnsmasq-dns" Oct 11 04:12:17 crc kubenswrapper[4798]: I1011 04:12:17.418343 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="6008f2a1-8398-4d56-a3a2-c77e8923a800" containerName="dnsmasq-dns" Oct 11 04:12:17 crc kubenswrapper[4798]: I1011 04:12:17.418365 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="a5162cce-7f28-490f-8a4d-f1d10824cafa" containerName="barbican-api" Oct 11 04:12:17 crc kubenswrapper[4798]: I1011 04:12:17.418412 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="a5162cce-7f28-490f-8a4d-f1d10824cafa" containerName="barbican-api-log" Oct 11 04:12:17 crc kubenswrapper[4798]: I1011 04:12:17.419525 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-5744f55857-rnj42" Oct 11 04:12:17 crc kubenswrapper[4798]: I1011 04:12:17.424874 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-public-svc" Oct 11 04:12:17 crc kubenswrapper[4798]: I1011 04:12:17.424934 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-internal-svc" Oct 11 04:12:17 crc kubenswrapper[4798]: I1011 04:12:17.445813 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1969fe5d-5381-4726-b380-6e3e89e3c68b" path="/var/lib/kubelet/pods/1969fe5d-5381-4726-b380-6e3e89e3c68b/volumes" Oct 11 04:12:17 crc kubenswrapper[4798]: I1011 04:12:17.446868 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6008f2a1-8398-4d56-a3a2-c77e8923a800" path="/var/lib/kubelet/pods/6008f2a1-8398-4d56-a3a2-c77e8923a800/volumes" Oct 11 04:12:17 crc kubenswrapper[4798]: I1011 04:12:17.447656 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a5162cce-7f28-490f-8a4d-f1d10824cafa" path="/var/lib/kubelet/pods/a5162cce-7f28-490f-8a4d-f1d10824cafa/volumes" Oct 11 04:12:17 crc kubenswrapper[4798]: I1011 04:12:17.466115 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-5744f55857-rnj42"] Oct 11 04:12:17 crc kubenswrapper[4798]: I1011 04:12:17.505576 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fz8xj\" (UniqueName: \"kubernetes.io/projected/985c3452-9836-40f4-8b42-8082b04ffba3-kube-api-access-fz8xj\") pod \"neutron-5744f55857-rnj42\" (UID: \"985c3452-9836-40f4-8b42-8082b04ffba3\") " pod="openstack/neutron-5744f55857-rnj42" Oct 11 04:12:17 crc kubenswrapper[4798]: I1011 04:12:17.505631 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/985c3452-9836-40f4-8b42-8082b04ffba3-httpd-config\") pod \"neutron-5744f55857-rnj42\" (UID: \"985c3452-9836-40f4-8b42-8082b04ffba3\") " pod="openstack/neutron-5744f55857-rnj42" Oct 11 04:12:17 crc kubenswrapper[4798]: I1011 04:12:17.505664 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/985c3452-9836-40f4-8b42-8082b04ffba3-combined-ca-bundle\") pod \"neutron-5744f55857-rnj42\" (UID: \"985c3452-9836-40f4-8b42-8082b04ffba3\") " pod="openstack/neutron-5744f55857-rnj42" Oct 11 04:12:17 crc kubenswrapper[4798]: I1011 04:12:17.505783 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/985c3452-9836-40f4-8b42-8082b04ffba3-public-tls-certs\") pod \"neutron-5744f55857-rnj42\" (UID: \"985c3452-9836-40f4-8b42-8082b04ffba3\") " pod="openstack/neutron-5744f55857-rnj42" Oct 11 04:12:17 crc kubenswrapper[4798]: I1011 04:12:17.505835 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/985c3452-9836-40f4-8b42-8082b04ffba3-ovndb-tls-certs\") pod \"neutron-5744f55857-rnj42\" (UID: \"985c3452-9836-40f4-8b42-8082b04ffba3\") " pod="openstack/neutron-5744f55857-rnj42" Oct 11 04:12:17 crc kubenswrapper[4798]: I1011 04:12:17.505854 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/985c3452-9836-40f4-8b42-8082b04ffba3-config\") pod \"neutron-5744f55857-rnj42\" (UID: \"985c3452-9836-40f4-8b42-8082b04ffba3\") " pod="openstack/neutron-5744f55857-rnj42" Oct 11 04:12:17 crc kubenswrapper[4798]: I1011 04:12:17.505872 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/985c3452-9836-40f4-8b42-8082b04ffba3-internal-tls-certs\") pod \"neutron-5744f55857-rnj42\" (UID: \"985c3452-9836-40f4-8b42-8082b04ffba3\") " pod="openstack/neutron-5744f55857-rnj42" Oct 11 04:12:17 crc kubenswrapper[4798]: I1011 04:12:17.536594 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/keystone-bf585fdff-grnph" Oct 11 04:12:17 crc kubenswrapper[4798]: I1011 04:12:17.608130 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/985c3452-9836-40f4-8b42-8082b04ffba3-httpd-config\") pod \"neutron-5744f55857-rnj42\" (UID: \"985c3452-9836-40f4-8b42-8082b04ffba3\") " pod="openstack/neutron-5744f55857-rnj42" Oct 11 04:12:17 crc kubenswrapper[4798]: I1011 04:12:17.608179 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/985c3452-9836-40f4-8b42-8082b04ffba3-combined-ca-bundle\") pod \"neutron-5744f55857-rnj42\" (UID: \"985c3452-9836-40f4-8b42-8082b04ffba3\") " pod="openstack/neutron-5744f55857-rnj42" Oct 11 04:12:17 crc kubenswrapper[4798]: I1011 04:12:17.608293 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/985c3452-9836-40f4-8b42-8082b04ffba3-public-tls-certs\") pod \"neutron-5744f55857-rnj42\" (UID: \"985c3452-9836-40f4-8b42-8082b04ffba3\") " pod="openstack/neutron-5744f55857-rnj42" Oct 11 04:12:17 crc kubenswrapper[4798]: I1011 04:12:17.608318 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/985c3452-9836-40f4-8b42-8082b04ffba3-ovndb-tls-certs\") pod \"neutron-5744f55857-rnj42\" (UID: \"985c3452-9836-40f4-8b42-8082b04ffba3\") " pod="openstack/neutron-5744f55857-rnj42" Oct 11 04:12:17 crc kubenswrapper[4798]: I1011 04:12:17.608336 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/985c3452-9836-40f4-8b42-8082b04ffba3-config\") pod \"neutron-5744f55857-rnj42\" (UID: \"985c3452-9836-40f4-8b42-8082b04ffba3\") " pod="openstack/neutron-5744f55857-rnj42" Oct 11 04:12:17 crc kubenswrapper[4798]: I1011 04:12:17.609866 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/985c3452-9836-40f4-8b42-8082b04ffba3-internal-tls-certs\") pod \"neutron-5744f55857-rnj42\" (UID: \"985c3452-9836-40f4-8b42-8082b04ffba3\") " pod="openstack/neutron-5744f55857-rnj42" Oct 11 04:12:17 crc kubenswrapper[4798]: I1011 04:12:17.609967 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fz8xj\" (UniqueName: \"kubernetes.io/projected/985c3452-9836-40f4-8b42-8082b04ffba3-kube-api-access-fz8xj\") pod \"neutron-5744f55857-rnj42\" (UID: \"985c3452-9836-40f4-8b42-8082b04ffba3\") " pod="openstack/neutron-5744f55857-rnj42" Oct 11 04:12:17 crc kubenswrapper[4798]: I1011 04:12:17.615922 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/985c3452-9836-40f4-8b42-8082b04ffba3-combined-ca-bundle\") pod \"neutron-5744f55857-rnj42\" (UID: \"985c3452-9836-40f4-8b42-8082b04ffba3\") " pod="openstack/neutron-5744f55857-rnj42" Oct 11 04:12:17 crc kubenswrapper[4798]: I1011 04:12:17.616844 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/985c3452-9836-40f4-8b42-8082b04ffba3-ovndb-tls-certs\") pod \"neutron-5744f55857-rnj42\" (UID: \"985c3452-9836-40f4-8b42-8082b04ffba3\") " pod="openstack/neutron-5744f55857-rnj42" Oct 11 04:12:17 crc kubenswrapper[4798]: I1011 04:12:17.620605 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/985c3452-9836-40f4-8b42-8082b04ffba3-config\") pod \"neutron-5744f55857-rnj42\" (UID: \"985c3452-9836-40f4-8b42-8082b04ffba3\") " pod="openstack/neutron-5744f55857-rnj42" Oct 11 04:12:17 crc kubenswrapper[4798]: I1011 04:12:17.626050 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/985c3452-9836-40f4-8b42-8082b04ffba3-internal-tls-certs\") pod \"neutron-5744f55857-rnj42\" (UID: \"985c3452-9836-40f4-8b42-8082b04ffba3\") " pod="openstack/neutron-5744f55857-rnj42" Oct 11 04:12:17 crc kubenswrapper[4798]: I1011 04:12:17.631972 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/985c3452-9836-40f4-8b42-8082b04ffba3-public-tls-certs\") pod \"neutron-5744f55857-rnj42\" (UID: \"985c3452-9836-40f4-8b42-8082b04ffba3\") " pod="openstack/neutron-5744f55857-rnj42" Oct 11 04:12:17 crc kubenswrapper[4798]: I1011 04:12:17.632429 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/985c3452-9836-40f4-8b42-8082b04ffba3-httpd-config\") pod \"neutron-5744f55857-rnj42\" (UID: \"985c3452-9836-40f4-8b42-8082b04ffba3\") " pod="openstack/neutron-5744f55857-rnj42" Oct 11 04:12:17 crc kubenswrapper[4798]: I1011 04:12:17.636713 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fz8xj\" (UniqueName: \"kubernetes.io/projected/985c3452-9836-40f4-8b42-8082b04ffba3-kube-api-access-fz8xj\") pod \"neutron-5744f55857-rnj42\" (UID: \"985c3452-9836-40f4-8b42-8082b04ffba3\") " pod="openstack/neutron-5744f55857-rnj42" Oct 11 04:12:17 crc kubenswrapper[4798]: I1011 04:12:17.749998 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-5744f55857-rnj42" Oct 11 04:12:18 crc kubenswrapper[4798]: I1011 04:12:18.002976 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"b454f77b-5618-4f7e-b603-9fec6030c732","Type":"ContainerStarted","Data":"4e253739496f1276501116eb887ceafe3ddaeb7296c6a000a43c7c8542ac024d"} Oct 11 04:12:18 crc kubenswrapper[4798]: I1011 04:12:18.003474 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"b454f77b-5618-4f7e-b603-9fec6030c732","Type":"ContainerStarted","Data":"dc2e3c897404f8bae8a29616703374c4eb6267a160e49be16512c074b1a043c8"} Oct 11 04:12:18 crc kubenswrapper[4798]: I1011 04:12:18.388729 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-5744f55857-rnj42"] Oct 11 04:12:19 crc kubenswrapper[4798]: I1011 04:12:19.012884 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5744f55857-rnj42" event={"ID":"985c3452-9836-40f4-8b42-8082b04ffba3","Type":"ContainerStarted","Data":"bb7ad3880ebcefd8697f0ff0183becdf53b35faec4376ef52c4d0cee3476b2b5"} Oct 11 04:12:19 crc kubenswrapper[4798]: I1011 04:12:19.013500 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5744f55857-rnj42" event={"ID":"985c3452-9836-40f4-8b42-8082b04ffba3","Type":"ContainerStarted","Data":"b7dc96b48f97c58270ce89a8a77d50ee6d07a318d9220f516c6a250a23456517"} Oct 11 04:12:19 crc kubenswrapper[4798]: I1011 04:12:19.013532 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-5744f55857-rnj42" Oct 11 04:12:19 crc kubenswrapper[4798]: I1011 04:12:19.013546 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5744f55857-rnj42" event={"ID":"985c3452-9836-40f4-8b42-8082b04ffba3","Type":"ContainerStarted","Data":"9f063624663fe38972857a913efbb0577468460a45c8bfb0f3aefd1530b9a7a6"} Oct 11 04:12:19 crc kubenswrapper[4798]: I1011 04:12:19.016994 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"b454f77b-5618-4f7e-b603-9fec6030c732","Type":"ContainerStarted","Data":"8d2e2c5ff0490a804ed10fd2654a10f5af66c4fe4309b69cead35a62ba985d21"} Oct 11 04:12:19 crc kubenswrapper[4798]: I1011 04:12:19.017464 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Oct 11 04:12:19 crc kubenswrapper[4798]: I1011 04:12:19.063616 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=4.063591278 podStartE2EDuration="4.063591278s" podCreationTimestamp="2025-10-11 04:12:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 04:12:19.061526849 +0000 UTC m=+1034.397816535" watchObservedRunningTime="2025-10-11 04:12:19.063591278 +0000 UTC m=+1034.399880974" Oct 11 04:12:19 crc kubenswrapper[4798]: I1011 04:12:19.067736 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-5744f55857-rnj42" podStartSLOduration=2.067723137 podStartE2EDuration="2.067723137s" podCreationTimestamp="2025-10-11 04:12:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 04:12:19.043735544 +0000 UTC m=+1034.380025220" watchObservedRunningTime="2025-10-11 04:12:19.067723137 +0000 UTC m=+1034.404012843" Oct 11 04:12:21 crc kubenswrapper[4798]: I1011 04:12:21.195658 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstackclient"] Oct 11 04:12:21 crc kubenswrapper[4798]: I1011 04:12:21.196836 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Oct 11 04:12:21 crc kubenswrapper[4798]: I1011 04:12:21.200875 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config" Oct 11 04:12:21 crc kubenswrapper[4798]: I1011 04:12:21.200938 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstackclient-openstackclient-dockercfg-5hl4p" Oct 11 04:12:21 crc kubenswrapper[4798]: I1011 04:12:21.201189 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-config-secret" Oct 11 04:12:21 crc kubenswrapper[4798]: I1011 04:12:21.214464 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Oct 11 04:12:21 crc kubenswrapper[4798]: I1011 04:12:21.284212 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0ecd7207-e032-4433-9077-b023a4c891f3-combined-ca-bundle\") pod \"openstackclient\" (UID: \"0ecd7207-e032-4433-9077-b023a4c891f3\") " pod="openstack/openstackclient" Oct 11 04:12:21 crc kubenswrapper[4798]: I1011 04:12:21.284305 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/0ecd7207-e032-4433-9077-b023a4c891f3-openstack-config-secret\") pod \"openstackclient\" (UID: \"0ecd7207-e032-4433-9077-b023a4c891f3\") " pod="openstack/openstackclient" Oct 11 04:12:21 crc kubenswrapper[4798]: I1011 04:12:21.284430 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/0ecd7207-e032-4433-9077-b023a4c891f3-openstack-config\") pod \"openstackclient\" (UID: \"0ecd7207-e032-4433-9077-b023a4c891f3\") " pod="openstack/openstackclient" Oct 11 04:12:21 crc kubenswrapper[4798]: I1011 04:12:21.284502 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6fww5\" (UniqueName: \"kubernetes.io/projected/0ecd7207-e032-4433-9077-b023a4c891f3-kube-api-access-6fww5\") pod \"openstackclient\" (UID: \"0ecd7207-e032-4433-9077-b023a4c891f3\") " pod="openstack/openstackclient" Oct 11 04:12:21 crc kubenswrapper[4798]: I1011 04:12:21.385972 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/0ecd7207-e032-4433-9077-b023a4c891f3-openstack-config\") pod \"openstackclient\" (UID: \"0ecd7207-e032-4433-9077-b023a4c891f3\") " pod="openstack/openstackclient" Oct 11 04:12:21 crc kubenswrapper[4798]: I1011 04:12:21.386043 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6fww5\" (UniqueName: \"kubernetes.io/projected/0ecd7207-e032-4433-9077-b023a4c891f3-kube-api-access-6fww5\") pod \"openstackclient\" (UID: \"0ecd7207-e032-4433-9077-b023a4c891f3\") " pod="openstack/openstackclient" Oct 11 04:12:21 crc kubenswrapper[4798]: I1011 04:12:21.386132 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0ecd7207-e032-4433-9077-b023a4c891f3-combined-ca-bundle\") pod \"openstackclient\" (UID: \"0ecd7207-e032-4433-9077-b023a4c891f3\") " pod="openstack/openstackclient" Oct 11 04:12:21 crc kubenswrapper[4798]: I1011 04:12:21.386162 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/0ecd7207-e032-4433-9077-b023a4c891f3-openstack-config-secret\") pod \"openstackclient\" (UID: \"0ecd7207-e032-4433-9077-b023a4c891f3\") " pod="openstack/openstackclient" Oct 11 04:12:21 crc kubenswrapper[4798]: I1011 04:12:21.388102 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/0ecd7207-e032-4433-9077-b023a4c891f3-openstack-config\") pod \"openstackclient\" (UID: \"0ecd7207-e032-4433-9077-b023a4c891f3\") " pod="openstack/openstackclient" Oct 11 04:12:21 crc kubenswrapper[4798]: I1011 04:12:21.398434 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/0ecd7207-e032-4433-9077-b023a4c891f3-openstack-config-secret\") pod \"openstackclient\" (UID: \"0ecd7207-e032-4433-9077-b023a4c891f3\") " pod="openstack/openstackclient" Oct 11 04:12:21 crc kubenswrapper[4798]: I1011 04:12:21.399490 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0ecd7207-e032-4433-9077-b023a4c891f3-combined-ca-bundle\") pod \"openstackclient\" (UID: \"0ecd7207-e032-4433-9077-b023a4c891f3\") " pod="openstack/openstackclient" Oct 11 04:12:21 crc kubenswrapper[4798]: I1011 04:12:21.409252 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6fww5\" (UniqueName: \"kubernetes.io/projected/0ecd7207-e032-4433-9077-b023a4c891f3-kube-api-access-6fww5\") pod \"openstackclient\" (UID: \"0ecd7207-e032-4433-9077-b023a4c891f3\") " pod="openstack/openstackclient" Oct 11 04:12:21 crc kubenswrapper[4798]: I1011 04:12:21.530670 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Oct 11 04:12:22 crc kubenswrapper[4798]: I1011 04:12:22.052165 4798 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Oct 11 04:12:22 crc kubenswrapper[4798]: I1011 04:12:22.100060 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Oct 11 04:12:22 crc kubenswrapper[4798]: W1011 04:12:22.100749 4798 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0ecd7207_e032_4433_9077_b023a4c891f3.slice/crio-f5d0cb8290ede77a5d8298293db4bf69031fc90a85286a033fe56bcc5972d054 WatchSource:0}: Error finding container f5d0cb8290ede77a5d8298293db4bf69031fc90a85286a033fe56bcc5972d054: Status 404 returned error can't find the container with id f5d0cb8290ede77a5d8298293db4bf69031fc90a85286a033fe56bcc5972d054 Oct 11 04:12:22 crc kubenswrapper[4798]: I1011 04:12:22.128320 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Oct 11 04:12:23 crc kubenswrapper[4798]: I1011 04:12:23.063502 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"0ecd7207-e032-4433-9077-b023a4c891f3","Type":"ContainerStarted","Data":"f5d0cb8290ede77a5d8298293db4bf69031fc90a85286a033fe56bcc5972d054"} Oct 11 04:12:23 crc kubenswrapper[4798]: I1011 04:12:23.063746 4798 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="bc36d523-b6e0-4b0e-a50f-45a173f728b1" containerName="cinder-scheduler" containerID="cri-o://2539e9a6d5c9ddd24e80380f4158dca5f0fa866b3cb69f4606f662d1366fcce4" gracePeriod=30 Oct 11 04:12:23 crc kubenswrapper[4798]: I1011 04:12:23.064288 4798 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="bc36d523-b6e0-4b0e-a50f-45a173f728b1" containerName="probe" containerID="cri-o://d623ac4cf836ee3767e600e66a3eb9468b636b4ddd98e67486ed4bba6dbac1f2" gracePeriod=30 Oct 11 04:12:24 crc kubenswrapper[4798]: I1011 04:12:24.388626 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-58db5546cc-zfln8" Oct 11 04:12:24 crc kubenswrapper[4798]: I1011 04:12:24.471733 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7f46f79845-lc5wn"] Oct 11 04:12:24 crc kubenswrapper[4798]: I1011 04:12:24.472049 4798 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-7f46f79845-lc5wn" podUID="1a9dc4f3-49eb-45e4-973c-8556635d4b23" containerName="dnsmasq-dns" containerID="cri-o://e9911bfeaebd051b283f37780e60b4f31c60e7ae997869f1b9d02825da1a40a9" gracePeriod=10 Oct 11 04:12:25 crc kubenswrapper[4798]: I1011 04:12:25.003761 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7f46f79845-lc5wn" Oct 11 04:12:25 crc kubenswrapper[4798]: I1011 04:12:25.060221 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/1a9dc4f3-49eb-45e4-973c-8556635d4b23-ovsdbserver-nb\") pod \"1a9dc4f3-49eb-45e4-973c-8556635d4b23\" (UID: \"1a9dc4f3-49eb-45e4-973c-8556635d4b23\") " Oct 11 04:12:25 crc kubenswrapper[4798]: I1011 04:12:25.060272 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/1a9dc4f3-49eb-45e4-973c-8556635d4b23-ovsdbserver-sb\") pod \"1a9dc4f3-49eb-45e4-973c-8556635d4b23\" (UID: \"1a9dc4f3-49eb-45e4-973c-8556635d4b23\") " Oct 11 04:12:25 crc kubenswrapper[4798]: I1011 04:12:25.060343 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kcrth\" (UniqueName: \"kubernetes.io/projected/1a9dc4f3-49eb-45e4-973c-8556635d4b23-kube-api-access-kcrth\") pod \"1a9dc4f3-49eb-45e4-973c-8556635d4b23\" (UID: \"1a9dc4f3-49eb-45e4-973c-8556635d4b23\") " Oct 11 04:12:25 crc kubenswrapper[4798]: I1011 04:12:25.060491 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1a9dc4f3-49eb-45e4-973c-8556635d4b23-dns-svc\") pod \"1a9dc4f3-49eb-45e4-973c-8556635d4b23\" (UID: \"1a9dc4f3-49eb-45e4-973c-8556635d4b23\") " Oct 11 04:12:25 crc kubenswrapper[4798]: I1011 04:12:25.060555 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1a9dc4f3-49eb-45e4-973c-8556635d4b23-config\") pod \"1a9dc4f3-49eb-45e4-973c-8556635d4b23\" (UID: \"1a9dc4f3-49eb-45e4-973c-8556635d4b23\") " Oct 11 04:12:25 crc kubenswrapper[4798]: I1011 04:12:25.078163 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1a9dc4f3-49eb-45e4-973c-8556635d4b23-kube-api-access-kcrth" (OuterVolumeSpecName: "kube-api-access-kcrth") pod "1a9dc4f3-49eb-45e4-973c-8556635d4b23" (UID: "1a9dc4f3-49eb-45e4-973c-8556635d4b23"). InnerVolumeSpecName "kube-api-access-kcrth". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 04:12:25 crc kubenswrapper[4798]: I1011 04:12:25.096286 4798 generic.go:334] "Generic (PLEG): container finished" podID="1a9dc4f3-49eb-45e4-973c-8556635d4b23" containerID="e9911bfeaebd051b283f37780e60b4f31c60e7ae997869f1b9d02825da1a40a9" exitCode=0 Oct 11 04:12:25 crc kubenswrapper[4798]: I1011 04:12:25.096359 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7f46f79845-lc5wn" event={"ID":"1a9dc4f3-49eb-45e4-973c-8556635d4b23","Type":"ContainerDied","Data":"e9911bfeaebd051b283f37780e60b4f31c60e7ae997869f1b9d02825da1a40a9"} Oct 11 04:12:25 crc kubenswrapper[4798]: I1011 04:12:25.096406 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7f46f79845-lc5wn" event={"ID":"1a9dc4f3-49eb-45e4-973c-8556635d4b23","Type":"ContainerDied","Data":"cb6c28060d166bdf568e4d2b57b3902572460eecbaf5ea3942e220650fb801ca"} Oct 11 04:12:25 crc kubenswrapper[4798]: I1011 04:12:25.096427 4798 scope.go:117] "RemoveContainer" containerID="e9911bfeaebd051b283f37780e60b4f31c60e7ae997869f1b9d02825da1a40a9" Oct 11 04:12:25 crc kubenswrapper[4798]: I1011 04:12:25.096568 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7f46f79845-lc5wn" Oct 11 04:12:25 crc kubenswrapper[4798]: I1011 04:12:25.103717 4798 generic.go:334] "Generic (PLEG): container finished" podID="bc36d523-b6e0-4b0e-a50f-45a173f728b1" containerID="d623ac4cf836ee3767e600e66a3eb9468b636b4ddd98e67486ed4bba6dbac1f2" exitCode=0 Oct 11 04:12:25 crc kubenswrapper[4798]: I1011 04:12:25.103810 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"bc36d523-b6e0-4b0e-a50f-45a173f728b1","Type":"ContainerDied","Data":"d623ac4cf836ee3767e600e66a3eb9468b636b4ddd98e67486ed4bba6dbac1f2"} Oct 11 04:12:25 crc kubenswrapper[4798]: I1011 04:12:25.109615 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1a9dc4f3-49eb-45e4-973c-8556635d4b23-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "1a9dc4f3-49eb-45e4-973c-8556635d4b23" (UID: "1a9dc4f3-49eb-45e4-973c-8556635d4b23"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 04:12:25 crc kubenswrapper[4798]: I1011 04:12:25.114466 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1a9dc4f3-49eb-45e4-973c-8556635d4b23-config" (OuterVolumeSpecName: "config") pod "1a9dc4f3-49eb-45e4-973c-8556635d4b23" (UID: "1a9dc4f3-49eb-45e4-973c-8556635d4b23"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 04:12:25 crc kubenswrapper[4798]: I1011 04:12:25.122138 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1a9dc4f3-49eb-45e4-973c-8556635d4b23-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "1a9dc4f3-49eb-45e4-973c-8556635d4b23" (UID: "1a9dc4f3-49eb-45e4-973c-8556635d4b23"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 04:12:25 crc kubenswrapper[4798]: I1011 04:12:25.146155 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1a9dc4f3-49eb-45e4-973c-8556635d4b23-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "1a9dc4f3-49eb-45e4-973c-8556635d4b23" (UID: "1a9dc4f3-49eb-45e4-973c-8556635d4b23"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 04:12:25 crc kubenswrapper[4798]: I1011 04:12:25.163509 4798 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/1a9dc4f3-49eb-45e4-973c-8556635d4b23-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Oct 11 04:12:25 crc kubenswrapper[4798]: I1011 04:12:25.163547 4798 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/1a9dc4f3-49eb-45e4-973c-8556635d4b23-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Oct 11 04:12:25 crc kubenswrapper[4798]: I1011 04:12:25.163561 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kcrth\" (UniqueName: \"kubernetes.io/projected/1a9dc4f3-49eb-45e4-973c-8556635d4b23-kube-api-access-kcrth\") on node \"crc\" DevicePath \"\"" Oct 11 04:12:25 crc kubenswrapper[4798]: I1011 04:12:25.163577 4798 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1a9dc4f3-49eb-45e4-973c-8556635d4b23-dns-svc\") on node \"crc\" DevicePath \"\"" Oct 11 04:12:25 crc kubenswrapper[4798]: I1011 04:12:25.163588 4798 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1a9dc4f3-49eb-45e4-973c-8556635d4b23-config\") on node \"crc\" DevicePath \"\"" Oct 11 04:12:25 crc kubenswrapper[4798]: I1011 04:12:25.210233 4798 scope.go:117] "RemoveContainer" containerID="eec7d039970114cae6055b3b75c68580d67680a90c0ec46a80b54f77bdfdd9ef" Oct 11 04:12:25 crc kubenswrapper[4798]: I1011 04:12:25.230769 4798 scope.go:117] "RemoveContainer" containerID="e9911bfeaebd051b283f37780e60b4f31c60e7ae997869f1b9d02825da1a40a9" Oct 11 04:12:25 crc kubenswrapper[4798]: E1011 04:12:25.233005 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e9911bfeaebd051b283f37780e60b4f31c60e7ae997869f1b9d02825da1a40a9\": container with ID starting with e9911bfeaebd051b283f37780e60b4f31c60e7ae997869f1b9d02825da1a40a9 not found: ID does not exist" containerID="e9911bfeaebd051b283f37780e60b4f31c60e7ae997869f1b9d02825da1a40a9" Oct 11 04:12:25 crc kubenswrapper[4798]: I1011 04:12:25.233055 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e9911bfeaebd051b283f37780e60b4f31c60e7ae997869f1b9d02825da1a40a9"} err="failed to get container status \"e9911bfeaebd051b283f37780e60b4f31c60e7ae997869f1b9d02825da1a40a9\": rpc error: code = NotFound desc = could not find container \"e9911bfeaebd051b283f37780e60b4f31c60e7ae997869f1b9d02825da1a40a9\": container with ID starting with e9911bfeaebd051b283f37780e60b4f31c60e7ae997869f1b9d02825da1a40a9 not found: ID does not exist" Oct 11 04:12:25 crc kubenswrapper[4798]: I1011 04:12:25.233086 4798 scope.go:117] "RemoveContainer" containerID="eec7d039970114cae6055b3b75c68580d67680a90c0ec46a80b54f77bdfdd9ef" Oct 11 04:12:25 crc kubenswrapper[4798]: E1011 04:12:25.234691 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"eec7d039970114cae6055b3b75c68580d67680a90c0ec46a80b54f77bdfdd9ef\": container with ID starting with eec7d039970114cae6055b3b75c68580d67680a90c0ec46a80b54f77bdfdd9ef not found: ID does not exist" containerID="eec7d039970114cae6055b3b75c68580d67680a90c0ec46a80b54f77bdfdd9ef" Oct 11 04:12:25 crc kubenswrapper[4798]: I1011 04:12:25.234751 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"eec7d039970114cae6055b3b75c68580d67680a90c0ec46a80b54f77bdfdd9ef"} err="failed to get container status \"eec7d039970114cae6055b3b75c68580d67680a90c0ec46a80b54f77bdfdd9ef\": rpc error: code = NotFound desc = could not find container \"eec7d039970114cae6055b3b75c68580d67680a90c0ec46a80b54f77bdfdd9ef\": container with ID starting with eec7d039970114cae6055b3b75c68580d67680a90c0ec46a80b54f77bdfdd9ef not found: ID does not exist" Oct 11 04:12:25 crc kubenswrapper[4798]: I1011 04:12:25.465599 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7f46f79845-lc5wn"] Oct 11 04:12:25 crc kubenswrapper[4798]: I1011 04:12:25.496174 4798 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-7f46f79845-lc5wn"] Oct 11 04:12:26 crc kubenswrapper[4798]: I1011 04:12:26.117961 4798 generic.go:334] "Generic (PLEG): container finished" podID="bc36d523-b6e0-4b0e-a50f-45a173f728b1" containerID="2539e9a6d5c9ddd24e80380f4158dca5f0fa866b3cb69f4606f662d1366fcce4" exitCode=0 Oct 11 04:12:26 crc kubenswrapper[4798]: I1011 04:12:26.118141 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"bc36d523-b6e0-4b0e-a50f-45a173f728b1","Type":"ContainerDied","Data":"2539e9a6d5c9ddd24e80380f4158dca5f0fa866b3cb69f4606f662d1366fcce4"} Oct 11 04:12:26 crc kubenswrapper[4798]: I1011 04:12:26.494803 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Oct 11 04:12:26 crc kubenswrapper[4798]: I1011 04:12:26.590472 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bc36d523-b6e0-4b0e-a50f-45a173f728b1-config-data\") pod \"bc36d523-b6e0-4b0e-a50f-45a173f728b1\" (UID: \"bc36d523-b6e0-4b0e-a50f-45a173f728b1\") " Oct 11 04:12:26 crc kubenswrapper[4798]: I1011 04:12:26.590570 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bc36d523-b6e0-4b0e-a50f-45a173f728b1-combined-ca-bundle\") pod \"bc36d523-b6e0-4b0e-a50f-45a173f728b1\" (UID: \"bc36d523-b6e0-4b0e-a50f-45a173f728b1\") " Oct 11 04:12:26 crc kubenswrapper[4798]: I1011 04:12:26.590635 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/bc36d523-b6e0-4b0e-a50f-45a173f728b1-config-data-custom\") pod \"bc36d523-b6e0-4b0e-a50f-45a173f728b1\" (UID: \"bc36d523-b6e0-4b0e-a50f-45a173f728b1\") " Oct 11 04:12:26 crc kubenswrapper[4798]: I1011 04:12:26.590704 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/bc36d523-b6e0-4b0e-a50f-45a173f728b1-etc-machine-id\") pod \"bc36d523-b6e0-4b0e-a50f-45a173f728b1\" (UID: \"bc36d523-b6e0-4b0e-a50f-45a173f728b1\") " Oct 11 04:12:26 crc kubenswrapper[4798]: I1011 04:12:26.590773 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bc36d523-b6e0-4b0e-a50f-45a173f728b1-scripts\") pod \"bc36d523-b6e0-4b0e-a50f-45a173f728b1\" (UID: \"bc36d523-b6e0-4b0e-a50f-45a173f728b1\") " Oct 11 04:12:26 crc kubenswrapper[4798]: I1011 04:12:26.590830 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2h6qq\" (UniqueName: \"kubernetes.io/projected/bc36d523-b6e0-4b0e-a50f-45a173f728b1-kube-api-access-2h6qq\") pod \"bc36d523-b6e0-4b0e-a50f-45a173f728b1\" (UID: \"bc36d523-b6e0-4b0e-a50f-45a173f728b1\") " Oct 11 04:12:26 crc kubenswrapper[4798]: I1011 04:12:26.590836 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/bc36d523-b6e0-4b0e-a50f-45a173f728b1-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "bc36d523-b6e0-4b0e-a50f-45a173f728b1" (UID: "bc36d523-b6e0-4b0e-a50f-45a173f728b1"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 11 04:12:26 crc kubenswrapper[4798]: I1011 04:12:26.591949 4798 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/bc36d523-b6e0-4b0e-a50f-45a173f728b1-etc-machine-id\") on node \"crc\" DevicePath \"\"" Oct 11 04:12:26 crc kubenswrapper[4798]: I1011 04:12:26.600451 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bc36d523-b6e0-4b0e-a50f-45a173f728b1-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "bc36d523-b6e0-4b0e-a50f-45a173f728b1" (UID: "bc36d523-b6e0-4b0e-a50f-45a173f728b1"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:12:26 crc kubenswrapper[4798]: I1011 04:12:26.604681 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bc36d523-b6e0-4b0e-a50f-45a173f728b1-scripts" (OuterVolumeSpecName: "scripts") pod "bc36d523-b6e0-4b0e-a50f-45a173f728b1" (UID: "bc36d523-b6e0-4b0e-a50f-45a173f728b1"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:12:26 crc kubenswrapper[4798]: I1011 04:12:26.604722 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bc36d523-b6e0-4b0e-a50f-45a173f728b1-kube-api-access-2h6qq" (OuterVolumeSpecName: "kube-api-access-2h6qq") pod "bc36d523-b6e0-4b0e-a50f-45a173f728b1" (UID: "bc36d523-b6e0-4b0e-a50f-45a173f728b1"). InnerVolumeSpecName "kube-api-access-2h6qq". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 04:12:26 crc kubenswrapper[4798]: I1011 04:12:26.674586 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bc36d523-b6e0-4b0e-a50f-45a173f728b1-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "bc36d523-b6e0-4b0e-a50f-45a173f728b1" (UID: "bc36d523-b6e0-4b0e-a50f-45a173f728b1"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:12:26 crc kubenswrapper[4798]: I1011 04:12:26.696123 4798 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bc36d523-b6e0-4b0e-a50f-45a173f728b1-scripts\") on node \"crc\" DevicePath \"\"" Oct 11 04:12:26 crc kubenswrapper[4798]: I1011 04:12:26.696178 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2h6qq\" (UniqueName: \"kubernetes.io/projected/bc36d523-b6e0-4b0e-a50f-45a173f728b1-kube-api-access-2h6qq\") on node \"crc\" DevicePath \"\"" Oct 11 04:12:26 crc kubenswrapper[4798]: I1011 04:12:26.696191 4798 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bc36d523-b6e0-4b0e-a50f-45a173f728b1-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 11 04:12:26 crc kubenswrapper[4798]: I1011 04:12:26.696200 4798 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/bc36d523-b6e0-4b0e-a50f-45a173f728b1-config-data-custom\") on node \"crc\" DevicePath \"\"" Oct 11 04:12:26 crc kubenswrapper[4798]: I1011 04:12:26.782461 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bc36d523-b6e0-4b0e-a50f-45a173f728b1-config-data" (OuterVolumeSpecName: "config-data") pod "bc36d523-b6e0-4b0e-a50f-45a173f728b1" (UID: "bc36d523-b6e0-4b0e-a50f-45a173f728b1"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:12:26 crc kubenswrapper[4798]: I1011 04:12:26.797837 4798 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bc36d523-b6e0-4b0e-a50f-45a173f728b1-config-data\") on node \"crc\" DevicePath \"\"" Oct 11 04:12:27 crc kubenswrapper[4798]: I1011 04:12:27.131321 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"bc36d523-b6e0-4b0e-a50f-45a173f728b1","Type":"ContainerDied","Data":"2031c713752bbc3430ed3c517f44efa7bae2dc893a9cd6f143938676fe5d5cce"} Oct 11 04:12:27 crc kubenswrapper[4798]: I1011 04:12:27.131849 4798 scope.go:117] "RemoveContainer" containerID="d623ac4cf836ee3767e600e66a3eb9468b636b4ddd98e67486ed4bba6dbac1f2" Oct 11 04:12:27 crc kubenswrapper[4798]: I1011 04:12:27.131430 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Oct 11 04:12:27 crc kubenswrapper[4798]: I1011 04:12:27.171840 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Oct 11 04:12:27 crc kubenswrapper[4798]: I1011 04:12:27.189314 4798 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-scheduler-0"] Oct 11 04:12:27 crc kubenswrapper[4798]: I1011 04:12:27.198011 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Oct 11 04:12:27 crc kubenswrapper[4798]: E1011 04:12:27.199609 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1a9dc4f3-49eb-45e4-973c-8556635d4b23" containerName="dnsmasq-dns" Oct 11 04:12:27 crc kubenswrapper[4798]: I1011 04:12:27.199645 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="1a9dc4f3-49eb-45e4-973c-8556635d4b23" containerName="dnsmasq-dns" Oct 11 04:12:27 crc kubenswrapper[4798]: E1011 04:12:27.199789 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bc36d523-b6e0-4b0e-a50f-45a173f728b1" containerName="cinder-scheduler" Oct 11 04:12:27 crc kubenswrapper[4798]: I1011 04:12:27.199801 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="bc36d523-b6e0-4b0e-a50f-45a173f728b1" containerName="cinder-scheduler" Oct 11 04:12:27 crc kubenswrapper[4798]: E1011 04:12:27.199837 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1a9dc4f3-49eb-45e4-973c-8556635d4b23" containerName="init" Oct 11 04:12:27 crc kubenswrapper[4798]: I1011 04:12:27.199847 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="1a9dc4f3-49eb-45e4-973c-8556635d4b23" containerName="init" Oct 11 04:12:27 crc kubenswrapper[4798]: E1011 04:12:27.199859 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bc36d523-b6e0-4b0e-a50f-45a173f728b1" containerName="probe" Oct 11 04:12:27 crc kubenswrapper[4798]: I1011 04:12:27.199866 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="bc36d523-b6e0-4b0e-a50f-45a173f728b1" containerName="probe" Oct 11 04:12:27 crc kubenswrapper[4798]: I1011 04:12:27.200337 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="bc36d523-b6e0-4b0e-a50f-45a173f728b1" containerName="probe" Oct 11 04:12:27 crc kubenswrapper[4798]: I1011 04:12:27.200464 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="bc36d523-b6e0-4b0e-a50f-45a173f728b1" containerName="cinder-scheduler" Oct 11 04:12:27 crc kubenswrapper[4798]: I1011 04:12:27.200481 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="1a9dc4f3-49eb-45e4-973c-8556635d4b23" containerName="dnsmasq-dns" Oct 11 04:12:27 crc kubenswrapper[4798]: I1011 04:12:27.204665 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Oct 11 04:12:27 crc kubenswrapper[4798]: I1011 04:12:27.209281 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Oct 11 04:12:27 crc kubenswrapper[4798]: I1011 04:12:27.216597 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Oct 11 04:12:27 crc kubenswrapper[4798]: I1011 04:12:27.308313 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/b88dab9f-9394-41b9-a314-ce7e36e021d8-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"b88dab9f-9394-41b9-a314-ce7e36e021d8\") " pod="openstack/cinder-scheduler-0" Oct 11 04:12:27 crc kubenswrapper[4798]: I1011 04:12:27.308362 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b88dab9f-9394-41b9-a314-ce7e36e021d8-config-data\") pod \"cinder-scheduler-0\" (UID: \"b88dab9f-9394-41b9-a314-ce7e36e021d8\") " pod="openstack/cinder-scheduler-0" Oct 11 04:12:27 crc kubenswrapper[4798]: I1011 04:12:27.308466 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b88dab9f-9394-41b9-a314-ce7e36e021d8-scripts\") pod \"cinder-scheduler-0\" (UID: \"b88dab9f-9394-41b9-a314-ce7e36e021d8\") " pod="openstack/cinder-scheduler-0" Oct 11 04:12:27 crc kubenswrapper[4798]: I1011 04:12:27.308514 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b88dab9f-9394-41b9-a314-ce7e36e021d8-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"b88dab9f-9394-41b9-a314-ce7e36e021d8\") " pod="openstack/cinder-scheduler-0" Oct 11 04:12:27 crc kubenswrapper[4798]: I1011 04:12:27.308530 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ss9rr\" (UniqueName: \"kubernetes.io/projected/b88dab9f-9394-41b9-a314-ce7e36e021d8-kube-api-access-ss9rr\") pod \"cinder-scheduler-0\" (UID: \"b88dab9f-9394-41b9-a314-ce7e36e021d8\") " pod="openstack/cinder-scheduler-0" Oct 11 04:12:27 crc kubenswrapper[4798]: I1011 04:12:27.308554 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b88dab9f-9394-41b9-a314-ce7e36e021d8-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"b88dab9f-9394-41b9-a314-ce7e36e021d8\") " pod="openstack/cinder-scheduler-0" Oct 11 04:12:27 crc kubenswrapper[4798]: I1011 04:12:27.412124 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/b88dab9f-9394-41b9-a314-ce7e36e021d8-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"b88dab9f-9394-41b9-a314-ce7e36e021d8\") " pod="openstack/cinder-scheduler-0" Oct 11 04:12:27 crc kubenswrapper[4798]: I1011 04:12:27.412203 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b88dab9f-9394-41b9-a314-ce7e36e021d8-config-data\") pod \"cinder-scheduler-0\" (UID: \"b88dab9f-9394-41b9-a314-ce7e36e021d8\") " pod="openstack/cinder-scheduler-0" Oct 11 04:12:27 crc kubenswrapper[4798]: I1011 04:12:27.412281 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b88dab9f-9394-41b9-a314-ce7e36e021d8-scripts\") pod \"cinder-scheduler-0\" (UID: \"b88dab9f-9394-41b9-a314-ce7e36e021d8\") " pod="openstack/cinder-scheduler-0" Oct 11 04:12:27 crc kubenswrapper[4798]: I1011 04:12:27.412290 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/b88dab9f-9394-41b9-a314-ce7e36e021d8-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"b88dab9f-9394-41b9-a314-ce7e36e021d8\") " pod="openstack/cinder-scheduler-0" Oct 11 04:12:27 crc kubenswrapper[4798]: I1011 04:12:27.412363 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b88dab9f-9394-41b9-a314-ce7e36e021d8-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"b88dab9f-9394-41b9-a314-ce7e36e021d8\") " pod="openstack/cinder-scheduler-0" Oct 11 04:12:27 crc kubenswrapper[4798]: I1011 04:12:27.412529 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ss9rr\" (UniqueName: \"kubernetes.io/projected/b88dab9f-9394-41b9-a314-ce7e36e021d8-kube-api-access-ss9rr\") pod \"cinder-scheduler-0\" (UID: \"b88dab9f-9394-41b9-a314-ce7e36e021d8\") " pod="openstack/cinder-scheduler-0" Oct 11 04:12:27 crc kubenswrapper[4798]: I1011 04:12:27.412563 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b88dab9f-9394-41b9-a314-ce7e36e021d8-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"b88dab9f-9394-41b9-a314-ce7e36e021d8\") " pod="openstack/cinder-scheduler-0" Oct 11 04:12:27 crc kubenswrapper[4798]: I1011 04:12:27.417360 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b88dab9f-9394-41b9-a314-ce7e36e021d8-scripts\") pod \"cinder-scheduler-0\" (UID: \"b88dab9f-9394-41b9-a314-ce7e36e021d8\") " pod="openstack/cinder-scheduler-0" Oct 11 04:12:27 crc kubenswrapper[4798]: I1011 04:12:27.418348 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b88dab9f-9394-41b9-a314-ce7e36e021d8-config-data\") pod \"cinder-scheduler-0\" (UID: \"b88dab9f-9394-41b9-a314-ce7e36e021d8\") " pod="openstack/cinder-scheduler-0" Oct 11 04:12:27 crc kubenswrapper[4798]: I1011 04:12:27.421097 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b88dab9f-9394-41b9-a314-ce7e36e021d8-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"b88dab9f-9394-41b9-a314-ce7e36e021d8\") " pod="openstack/cinder-scheduler-0" Oct 11 04:12:27 crc kubenswrapper[4798]: I1011 04:12:27.421564 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b88dab9f-9394-41b9-a314-ce7e36e021d8-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"b88dab9f-9394-41b9-a314-ce7e36e021d8\") " pod="openstack/cinder-scheduler-0" Oct 11 04:12:27 crc kubenswrapper[4798]: I1011 04:12:27.436786 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ss9rr\" (UniqueName: \"kubernetes.io/projected/b88dab9f-9394-41b9-a314-ce7e36e021d8-kube-api-access-ss9rr\") pod \"cinder-scheduler-0\" (UID: \"b88dab9f-9394-41b9-a314-ce7e36e021d8\") " pod="openstack/cinder-scheduler-0" Oct 11 04:12:27 crc kubenswrapper[4798]: I1011 04:12:27.451534 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1a9dc4f3-49eb-45e4-973c-8556635d4b23" path="/var/lib/kubelet/pods/1a9dc4f3-49eb-45e4-973c-8556635d4b23/volumes" Oct 11 04:12:27 crc kubenswrapper[4798]: I1011 04:12:27.452770 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bc36d523-b6e0-4b0e-a50f-45a173f728b1" path="/var/lib/kubelet/pods/bc36d523-b6e0-4b0e-a50f-45a173f728b1/volumes" Oct 11 04:12:27 crc kubenswrapper[4798]: I1011 04:12:27.530813 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Oct 11 04:12:28 crc kubenswrapper[4798]: I1011 04:12:28.770773 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/cinder-api-0" Oct 11 04:12:31 crc kubenswrapper[4798]: I1011 04:12:31.768268 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Oct 11 04:12:31 crc kubenswrapper[4798]: I1011 04:12:31.768889 4798 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="dbe7cb03-b84f-4e72-a995-8d364b9d53ea" containerName="ceilometer-central-agent" containerID="cri-o://37b3342c810f4b45d07c05ba69dc2f6742bb593adfbe062d5979768c9f30129b" gracePeriod=30 Oct 11 04:12:31 crc kubenswrapper[4798]: I1011 04:12:31.769020 4798 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="dbe7cb03-b84f-4e72-a995-8d364b9d53ea" containerName="proxy-httpd" containerID="cri-o://0db8b307faf50ea0455a970691914b8ddb9749ba23ecdc8600559fc9d5367090" gracePeriod=30 Oct 11 04:12:31 crc kubenswrapper[4798]: I1011 04:12:31.769063 4798 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="dbe7cb03-b84f-4e72-a995-8d364b9d53ea" containerName="sg-core" containerID="cri-o://2c14c2de63d9fcb167f6decea44733514f16c80c8b09924307acc28744a7c953" gracePeriod=30 Oct 11 04:12:31 crc kubenswrapper[4798]: I1011 04:12:31.769097 4798 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="dbe7cb03-b84f-4e72-a995-8d364b9d53ea" containerName="ceilometer-notification-agent" containerID="cri-o://e8201f0bc7bc38e4ca139682da4e743f7d3b9187fcda29f1f26759b5d46e51eb" gracePeriod=30 Oct 11 04:12:31 crc kubenswrapper[4798]: I1011 04:12:31.776728 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Oct 11 04:12:31 crc kubenswrapper[4798]: E1011 04:12:31.919540 4798 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poddbe7cb03_b84f_4e72_a995_8d364b9d53ea.slice/crio-2c14c2de63d9fcb167f6decea44733514f16c80c8b09924307acc28744a7c953.scope\": RecentStats: unable to find data in memory cache]" Oct 11 04:12:32 crc kubenswrapper[4798]: I1011 04:12:32.231346 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"dbe7cb03-b84f-4e72-a995-8d364b9d53ea","Type":"ContainerDied","Data":"0db8b307faf50ea0455a970691914b8ddb9749ba23ecdc8600559fc9d5367090"} Oct 11 04:12:32 crc kubenswrapper[4798]: I1011 04:12:32.231284 4798 generic.go:334] "Generic (PLEG): container finished" podID="dbe7cb03-b84f-4e72-a995-8d364b9d53ea" containerID="0db8b307faf50ea0455a970691914b8ddb9749ba23ecdc8600559fc9d5367090" exitCode=0 Oct 11 04:12:32 crc kubenswrapper[4798]: I1011 04:12:32.232700 4798 generic.go:334] "Generic (PLEG): container finished" podID="dbe7cb03-b84f-4e72-a995-8d364b9d53ea" containerID="2c14c2de63d9fcb167f6decea44733514f16c80c8b09924307acc28744a7c953" exitCode=2 Oct 11 04:12:32 crc kubenswrapper[4798]: I1011 04:12:32.232723 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"dbe7cb03-b84f-4e72-a995-8d364b9d53ea","Type":"ContainerDied","Data":"2c14c2de63d9fcb167f6decea44733514f16c80c8b09924307acc28744a7c953"} Oct 11 04:12:32 crc kubenswrapper[4798]: I1011 04:12:32.252347 4798 scope.go:117] "RemoveContainer" containerID="2539e9a6d5c9ddd24e80380f4158dca5f0fa866b3cb69f4606f662d1366fcce4" Oct 11 04:12:32 crc kubenswrapper[4798]: W1011 04:12:32.852704 4798 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb88dab9f_9394_41b9_a314_ce7e36e021d8.slice/crio-b2f7b110efe37f18c5c3e78f6c7dd0c44c795af091953b71e5fd770b96aaacaa WatchSource:0}: Error finding container b2f7b110efe37f18c5c3e78f6c7dd0c44c795af091953b71e5fd770b96aaacaa: Status 404 returned error can't find the container with id b2f7b110efe37f18c5c3e78f6c7dd0c44c795af091953b71e5fd770b96aaacaa Oct 11 04:12:32 crc kubenswrapper[4798]: I1011 04:12:32.857315 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Oct 11 04:12:33 crc kubenswrapper[4798]: I1011 04:12:33.270767 4798 generic.go:334] "Generic (PLEG): container finished" podID="dbe7cb03-b84f-4e72-a995-8d364b9d53ea" containerID="37b3342c810f4b45d07c05ba69dc2f6742bb593adfbe062d5979768c9f30129b" exitCode=0 Oct 11 04:12:33 crc kubenswrapper[4798]: I1011 04:12:33.271066 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"dbe7cb03-b84f-4e72-a995-8d364b9d53ea","Type":"ContainerDied","Data":"37b3342c810f4b45d07c05ba69dc2f6742bb593adfbe062d5979768c9f30129b"} Oct 11 04:12:33 crc kubenswrapper[4798]: I1011 04:12:33.279701 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"0ecd7207-e032-4433-9077-b023a4c891f3","Type":"ContainerStarted","Data":"fa0e9de1f02f61b1de738168ebaca7985cc83b9e6fce65add877f321889d84ac"} Oct 11 04:12:33 crc kubenswrapper[4798]: I1011 04:12:33.288834 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"b88dab9f-9394-41b9-a314-ce7e36e021d8","Type":"ContainerStarted","Data":"b2f7b110efe37f18c5c3e78f6c7dd0c44c795af091953b71e5fd770b96aaacaa"} Oct 11 04:12:33 crc kubenswrapper[4798]: I1011 04:12:33.320759 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstackclient" podStartSLOduration=2.080253331 podStartE2EDuration="12.320737717s" podCreationTimestamp="2025-10-11 04:12:21 +0000 UTC" firstStartedPulling="2025-10-11 04:12:22.105188855 +0000 UTC m=+1037.441478531" lastFinishedPulling="2025-10-11 04:12:32.345673231 +0000 UTC m=+1047.681962917" observedRunningTime="2025-10-11 04:12:33.319721113 +0000 UTC m=+1048.656010799" watchObservedRunningTime="2025-10-11 04:12:33.320737717 +0000 UTC m=+1048.657027403" Oct 11 04:12:34 crc kubenswrapper[4798]: I1011 04:12:34.316028 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"b88dab9f-9394-41b9-a314-ce7e36e021d8","Type":"ContainerStarted","Data":"2ceccbc470aec1a5f02adce95d199c550d4343bd661f4dfac4e1831f16318e24"} Oct 11 04:12:34 crc kubenswrapper[4798]: I1011 04:12:34.316502 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"b88dab9f-9394-41b9-a314-ce7e36e021d8","Type":"ContainerStarted","Data":"ff5853165fe17669a438af939d6f5e9ea4753ec1942fe8067bfb4199aebd641c"} Oct 11 04:12:34 crc kubenswrapper[4798]: I1011 04:12:34.349311 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=7.34928574 podStartE2EDuration="7.34928574s" podCreationTimestamp="2025-10-11 04:12:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 04:12:34.336632638 +0000 UTC m=+1049.672922324" watchObservedRunningTime="2025-10-11 04:12:34.34928574 +0000 UTC m=+1049.685575426" Oct 11 04:12:37 crc kubenswrapper[4798]: I1011 04:12:37.106566 4798 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ceilometer-0" podUID="dbe7cb03-b84f-4e72-a995-8d364b9d53ea" containerName="proxy-httpd" probeResult="failure" output="Get \"http://10.217.0.148:3000/\": dial tcp 10.217.0.148:3000: connect: connection refused" Oct 11 04:12:37 crc kubenswrapper[4798]: I1011 04:12:37.197514 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Oct 11 04:12:37 crc kubenswrapper[4798]: I1011 04:12:37.197834 4798 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/kube-state-metrics-0" podUID="6ff4b0aa-8ae0-4301-8bb1-6978cfa47207" containerName="kube-state-metrics" containerID="cri-o://1d277328fed024bbbb27bfe2c63f2502b59c27bfe433add673853cd48171b6ca" gracePeriod=30 Oct 11 04:12:37 crc kubenswrapper[4798]: I1011 04:12:37.372056 4798 generic.go:334] "Generic (PLEG): container finished" podID="6ff4b0aa-8ae0-4301-8bb1-6978cfa47207" containerID="1d277328fed024bbbb27bfe2c63f2502b59c27bfe433add673853cd48171b6ca" exitCode=2 Oct 11 04:12:37 crc kubenswrapper[4798]: I1011 04:12:37.372151 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"6ff4b0aa-8ae0-4301-8bb1-6978cfa47207","Type":"ContainerDied","Data":"1d277328fed024bbbb27bfe2c63f2502b59c27bfe433add673853cd48171b6ca"} Oct 11 04:12:37 crc kubenswrapper[4798]: I1011 04:12:37.393596 4798 generic.go:334] "Generic (PLEG): container finished" podID="dbe7cb03-b84f-4e72-a995-8d364b9d53ea" containerID="e8201f0bc7bc38e4ca139682da4e743f7d3b9187fcda29f1f26759b5d46e51eb" exitCode=0 Oct 11 04:12:37 crc kubenswrapper[4798]: I1011 04:12:37.393683 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"dbe7cb03-b84f-4e72-a995-8d364b9d53ea","Type":"ContainerDied","Data":"e8201f0bc7bc38e4ca139682da4e743f7d3b9187fcda29f1f26759b5d46e51eb"} Oct 11 04:12:37 crc kubenswrapper[4798]: I1011 04:12:37.532298 4798 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Oct 11 04:12:37 crc kubenswrapper[4798]: I1011 04:12:37.709664 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Oct 11 04:12:37 crc kubenswrapper[4798]: I1011 04:12:37.714778 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Oct 11 04:12:37 crc kubenswrapper[4798]: I1011 04:12:37.857184 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/dbe7cb03-b84f-4e72-a995-8d364b9d53ea-run-httpd\") pod \"dbe7cb03-b84f-4e72-a995-8d364b9d53ea\" (UID: \"dbe7cb03-b84f-4e72-a995-8d364b9d53ea\") " Oct 11 04:12:37 crc kubenswrapper[4798]: I1011 04:12:37.857299 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mlwh2\" (UniqueName: \"kubernetes.io/projected/dbe7cb03-b84f-4e72-a995-8d364b9d53ea-kube-api-access-mlwh2\") pod \"dbe7cb03-b84f-4e72-a995-8d364b9d53ea\" (UID: \"dbe7cb03-b84f-4e72-a995-8d364b9d53ea\") " Oct 11 04:12:37 crc kubenswrapper[4798]: I1011 04:12:37.857623 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/dbe7cb03-b84f-4e72-a995-8d364b9d53ea-scripts\") pod \"dbe7cb03-b84f-4e72-a995-8d364b9d53ea\" (UID: \"dbe7cb03-b84f-4e72-a995-8d364b9d53ea\") " Oct 11 04:12:37 crc kubenswrapper[4798]: I1011 04:12:37.858130 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dbe7cb03-b84f-4e72-a995-8d364b9d53ea-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "dbe7cb03-b84f-4e72-a995-8d364b9d53ea" (UID: "dbe7cb03-b84f-4e72-a995-8d364b9d53ea"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 04:12:37 crc kubenswrapper[4798]: I1011 04:12:37.858184 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/dbe7cb03-b84f-4e72-a995-8d364b9d53ea-log-httpd\") pod \"dbe7cb03-b84f-4e72-a995-8d364b9d53ea\" (UID: \"dbe7cb03-b84f-4e72-a995-8d364b9d53ea\") " Oct 11 04:12:37 crc kubenswrapper[4798]: I1011 04:12:37.858257 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dbe7cb03-b84f-4e72-a995-8d364b9d53ea-combined-ca-bundle\") pod \"dbe7cb03-b84f-4e72-a995-8d364b9d53ea\" (UID: \"dbe7cb03-b84f-4e72-a995-8d364b9d53ea\") " Oct 11 04:12:37 crc kubenswrapper[4798]: I1011 04:12:37.858299 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pbqfg\" (UniqueName: \"kubernetes.io/projected/6ff4b0aa-8ae0-4301-8bb1-6978cfa47207-kube-api-access-pbqfg\") pod \"6ff4b0aa-8ae0-4301-8bb1-6978cfa47207\" (UID: \"6ff4b0aa-8ae0-4301-8bb1-6978cfa47207\") " Oct 11 04:12:37 crc kubenswrapper[4798]: I1011 04:12:37.858336 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dbe7cb03-b84f-4e72-a995-8d364b9d53ea-config-data\") pod \"dbe7cb03-b84f-4e72-a995-8d364b9d53ea\" (UID: \"dbe7cb03-b84f-4e72-a995-8d364b9d53ea\") " Oct 11 04:12:37 crc kubenswrapper[4798]: I1011 04:12:37.858365 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/dbe7cb03-b84f-4e72-a995-8d364b9d53ea-sg-core-conf-yaml\") pod \"dbe7cb03-b84f-4e72-a995-8d364b9d53ea\" (UID: \"dbe7cb03-b84f-4e72-a995-8d364b9d53ea\") " Oct 11 04:12:37 crc kubenswrapper[4798]: I1011 04:12:37.858809 4798 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/dbe7cb03-b84f-4e72-a995-8d364b9d53ea-run-httpd\") on node \"crc\" DevicePath \"\"" Oct 11 04:12:37 crc kubenswrapper[4798]: I1011 04:12:37.860306 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dbe7cb03-b84f-4e72-a995-8d364b9d53ea-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "dbe7cb03-b84f-4e72-a995-8d364b9d53ea" (UID: "dbe7cb03-b84f-4e72-a995-8d364b9d53ea"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 04:12:37 crc kubenswrapper[4798]: I1011 04:12:37.866512 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6ff4b0aa-8ae0-4301-8bb1-6978cfa47207-kube-api-access-pbqfg" (OuterVolumeSpecName: "kube-api-access-pbqfg") pod "6ff4b0aa-8ae0-4301-8bb1-6978cfa47207" (UID: "6ff4b0aa-8ae0-4301-8bb1-6978cfa47207"). InnerVolumeSpecName "kube-api-access-pbqfg". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 04:12:37 crc kubenswrapper[4798]: I1011 04:12:37.869509 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dbe7cb03-b84f-4e72-a995-8d364b9d53ea-scripts" (OuterVolumeSpecName: "scripts") pod "dbe7cb03-b84f-4e72-a995-8d364b9d53ea" (UID: "dbe7cb03-b84f-4e72-a995-8d364b9d53ea"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:12:37 crc kubenswrapper[4798]: I1011 04:12:37.869704 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dbe7cb03-b84f-4e72-a995-8d364b9d53ea-kube-api-access-mlwh2" (OuterVolumeSpecName: "kube-api-access-mlwh2") pod "dbe7cb03-b84f-4e72-a995-8d364b9d53ea" (UID: "dbe7cb03-b84f-4e72-a995-8d364b9d53ea"). InnerVolumeSpecName "kube-api-access-mlwh2". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 04:12:37 crc kubenswrapper[4798]: I1011 04:12:37.888645 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dbe7cb03-b84f-4e72-a995-8d364b9d53ea-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "dbe7cb03-b84f-4e72-a995-8d364b9d53ea" (UID: "dbe7cb03-b84f-4e72-a995-8d364b9d53ea"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:12:37 crc kubenswrapper[4798]: I1011 04:12:37.945769 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dbe7cb03-b84f-4e72-a995-8d364b9d53ea-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "dbe7cb03-b84f-4e72-a995-8d364b9d53ea" (UID: "dbe7cb03-b84f-4e72-a995-8d364b9d53ea"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:12:37 crc kubenswrapper[4798]: I1011 04:12:37.961446 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mlwh2\" (UniqueName: \"kubernetes.io/projected/dbe7cb03-b84f-4e72-a995-8d364b9d53ea-kube-api-access-mlwh2\") on node \"crc\" DevicePath \"\"" Oct 11 04:12:37 crc kubenswrapper[4798]: I1011 04:12:37.961493 4798 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/dbe7cb03-b84f-4e72-a995-8d364b9d53ea-scripts\") on node \"crc\" DevicePath \"\"" Oct 11 04:12:37 crc kubenswrapper[4798]: I1011 04:12:37.961507 4798 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/dbe7cb03-b84f-4e72-a995-8d364b9d53ea-log-httpd\") on node \"crc\" DevicePath \"\"" Oct 11 04:12:37 crc kubenswrapper[4798]: I1011 04:12:37.961520 4798 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dbe7cb03-b84f-4e72-a995-8d364b9d53ea-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 11 04:12:37 crc kubenswrapper[4798]: I1011 04:12:37.961534 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pbqfg\" (UniqueName: \"kubernetes.io/projected/6ff4b0aa-8ae0-4301-8bb1-6978cfa47207-kube-api-access-pbqfg\") on node \"crc\" DevicePath \"\"" Oct 11 04:12:37 crc kubenswrapper[4798]: I1011 04:12:37.961548 4798 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/dbe7cb03-b84f-4e72-a995-8d364b9d53ea-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Oct 11 04:12:37 crc kubenswrapper[4798]: I1011 04:12:37.963497 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dbe7cb03-b84f-4e72-a995-8d364b9d53ea-config-data" (OuterVolumeSpecName: "config-data") pod "dbe7cb03-b84f-4e72-a995-8d364b9d53ea" (UID: "dbe7cb03-b84f-4e72-a995-8d364b9d53ea"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:12:38 crc kubenswrapper[4798]: I1011 04:12:38.064156 4798 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dbe7cb03-b84f-4e72-a995-8d364b9d53ea-config-data\") on node \"crc\" DevicePath \"\"" Oct 11 04:12:38 crc kubenswrapper[4798]: I1011 04:12:38.415155 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"6ff4b0aa-8ae0-4301-8bb1-6978cfa47207","Type":"ContainerDied","Data":"e3c860f179de47cf2aa2aaee0b00ce3c6aa9de3a3cc91d5bbdb78390368fc4b0"} Oct 11 04:12:38 crc kubenswrapper[4798]: I1011 04:12:38.415173 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Oct 11 04:12:38 crc kubenswrapper[4798]: I1011 04:12:38.415479 4798 scope.go:117] "RemoveContainer" containerID="1d277328fed024bbbb27bfe2c63f2502b59c27bfe433add673853cd48171b6ca" Oct 11 04:12:38 crc kubenswrapper[4798]: I1011 04:12:38.419376 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"dbe7cb03-b84f-4e72-a995-8d364b9d53ea","Type":"ContainerDied","Data":"7fd3d69349d9c24f3203595a2e3fcd997f21d1feb62881b16c6962635a9bdbc8"} Oct 11 04:12:38 crc kubenswrapper[4798]: I1011 04:12:38.419542 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Oct 11 04:12:38 crc kubenswrapper[4798]: I1011 04:12:38.444372 4798 scope.go:117] "RemoveContainer" containerID="0db8b307faf50ea0455a970691914b8ddb9749ba23ecdc8600559fc9d5367090" Oct 11 04:12:38 crc kubenswrapper[4798]: I1011 04:12:38.464739 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Oct 11 04:12:38 crc kubenswrapper[4798]: I1011 04:12:38.478203 4798 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Oct 11 04:12:38 crc kubenswrapper[4798]: I1011 04:12:38.492827 4798 scope.go:117] "RemoveContainer" containerID="2c14c2de63d9fcb167f6decea44733514f16c80c8b09924307acc28744a7c953" Oct 11 04:12:38 crc kubenswrapper[4798]: I1011 04:12:38.509660 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Oct 11 04:12:38 crc kubenswrapper[4798]: I1011 04:12:38.520570 4798 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/kube-state-metrics-0"] Oct 11 04:12:38 crc kubenswrapper[4798]: I1011 04:12:38.529803 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Oct 11 04:12:38 crc kubenswrapper[4798]: E1011 04:12:38.530284 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dbe7cb03-b84f-4e72-a995-8d364b9d53ea" containerName="proxy-httpd" Oct 11 04:12:38 crc kubenswrapper[4798]: I1011 04:12:38.530305 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="dbe7cb03-b84f-4e72-a995-8d364b9d53ea" containerName="proxy-httpd" Oct 11 04:12:38 crc kubenswrapper[4798]: E1011 04:12:38.530325 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dbe7cb03-b84f-4e72-a995-8d364b9d53ea" containerName="sg-core" Oct 11 04:12:38 crc kubenswrapper[4798]: I1011 04:12:38.530333 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="dbe7cb03-b84f-4e72-a995-8d364b9d53ea" containerName="sg-core" Oct 11 04:12:38 crc kubenswrapper[4798]: E1011 04:12:38.530373 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dbe7cb03-b84f-4e72-a995-8d364b9d53ea" containerName="ceilometer-central-agent" Oct 11 04:12:38 crc kubenswrapper[4798]: I1011 04:12:38.530383 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="dbe7cb03-b84f-4e72-a995-8d364b9d53ea" containerName="ceilometer-central-agent" Oct 11 04:12:38 crc kubenswrapper[4798]: E1011 04:12:38.530417 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6ff4b0aa-8ae0-4301-8bb1-6978cfa47207" containerName="kube-state-metrics" Oct 11 04:12:38 crc kubenswrapper[4798]: I1011 04:12:38.530425 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="6ff4b0aa-8ae0-4301-8bb1-6978cfa47207" containerName="kube-state-metrics" Oct 11 04:12:38 crc kubenswrapper[4798]: E1011 04:12:38.530438 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dbe7cb03-b84f-4e72-a995-8d364b9d53ea" containerName="ceilometer-notification-agent" Oct 11 04:12:38 crc kubenswrapper[4798]: I1011 04:12:38.530445 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="dbe7cb03-b84f-4e72-a995-8d364b9d53ea" containerName="ceilometer-notification-agent" Oct 11 04:12:38 crc kubenswrapper[4798]: I1011 04:12:38.530781 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="dbe7cb03-b84f-4e72-a995-8d364b9d53ea" containerName="sg-core" Oct 11 04:12:38 crc kubenswrapper[4798]: I1011 04:12:38.530806 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="6ff4b0aa-8ae0-4301-8bb1-6978cfa47207" containerName="kube-state-metrics" Oct 11 04:12:38 crc kubenswrapper[4798]: I1011 04:12:38.530817 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="dbe7cb03-b84f-4e72-a995-8d364b9d53ea" containerName="ceilometer-notification-agent" Oct 11 04:12:38 crc kubenswrapper[4798]: I1011 04:12:38.530841 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="dbe7cb03-b84f-4e72-a995-8d364b9d53ea" containerName="ceilometer-central-agent" Oct 11 04:12:38 crc kubenswrapper[4798]: I1011 04:12:38.530850 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="dbe7cb03-b84f-4e72-a995-8d364b9d53ea" containerName="proxy-httpd" Oct 11 04:12:38 crc kubenswrapper[4798]: I1011 04:12:38.531697 4798 scope.go:117] "RemoveContainer" containerID="e8201f0bc7bc38e4ca139682da4e743f7d3b9187fcda29f1f26759b5d46e51eb" Oct 11 04:12:38 crc kubenswrapper[4798]: I1011 04:12:38.532803 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Oct 11 04:12:38 crc kubenswrapper[4798]: I1011 04:12:38.539085 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Oct 11 04:12:38 crc kubenswrapper[4798]: I1011 04:12:38.539629 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Oct 11 04:12:38 crc kubenswrapper[4798]: I1011 04:12:38.539884 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Oct 11 04:12:38 crc kubenswrapper[4798]: I1011 04:12:38.540004 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-ceilometer-dockercfg-896fm" Oct 11 04:12:38 crc kubenswrapper[4798]: I1011 04:12:38.540099 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Oct 11 04:12:38 crc kubenswrapper[4798]: I1011 04:12:38.541284 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Oct 11 04:12:38 crc kubenswrapper[4798]: I1011 04:12:38.549948 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Oct 11 04:12:38 crc kubenswrapper[4798]: I1011 04:12:38.552019 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"kube-state-metrics-tls-config" Oct 11 04:12:38 crc kubenswrapper[4798]: I1011 04:12:38.558206 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Oct 11 04:12:38 crc kubenswrapper[4798]: I1011 04:12:38.559074 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-kube-state-metrics-svc" Oct 11 04:12:38 crc kubenswrapper[4798]: I1011 04:12:38.577107 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/a0b5cce2-9d60-4726-9660-bac9df48774f-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"a0b5cce2-9d60-4726-9660-bac9df48774f\") " pod="openstack/kube-state-metrics-0" Oct 11 04:12:38 crc kubenswrapper[4798]: I1011 04:12:38.577173 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/d835dc38-8ee2-4971-8e58-72fd231ba550-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"d835dc38-8ee2-4971-8e58-72fd231ba550\") " pod="openstack/ceilometer-0" Oct 11 04:12:38 crc kubenswrapper[4798]: I1011 04:12:38.578824 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d835dc38-8ee2-4971-8e58-72fd231ba550-run-httpd\") pod \"ceilometer-0\" (UID: \"d835dc38-8ee2-4971-8e58-72fd231ba550\") " pod="openstack/ceilometer-0" Oct 11 04:12:38 crc kubenswrapper[4798]: I1011 04:12:38.578928 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d835dc38-8ee2-4971-8e58-72fd231ba550-scripts\") pod \"ceilometer-0\" (UID: \"d835dc38-8ee2-4971-8e58-72fd231ba550\") " pod="openstack/ceilometer-0" Oct 11 04:12:38 crc kubenswrapper[4798]: I1011 04:12:38.579005 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d835dc38-8ee2-4971-8e58-72fd231ba550-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"d835dc38-8ee2-4971-8e58-72fd231ba550\") " pod="openstack/ceilometer-0" Oct 11 04:12:38 crc kubenswrapper[4798]: I1011 04:12:38.579076 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/d835dc38-8ee2-4971-8e58-72fd231ba550-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"d835dc38-8ee2-4971-8e58-72fd231ba550\") " pod="openstack/ceilometer-0" Oct 11 04:12:38 crc kubenswrapper[4798]: I1011 04:12:38.579186 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d835dc38-8ee2-4971-8e58-72fd231ba550-log-httpd\") pod \"ceilometer-0\" (UID: \"d835dc38-8ee2-4971-8e58-72fd231ba550\") " pod="openstack/ceilometer-0" Oct 11 04:12:38 crc kubenswrapper[4798]: I1011 04:12:38.579262 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a0b5cce2-9d60-4726-9660-bac9df48774f-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"a0b5cce2-9d60-4726-9660-bac9df48774f\") " pod="openstack/kube-state-metrics-0" Oct 11 04:12:38 crc kubenswrapper[4798]: I1011 04:12:38.579408 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d835dc38-8ee2-4971-8e58-72fd231ba550-config-data\") pod \"ceilometer-0\" (UID: \"d835dc38-8ee2-4971-8e58-72fd231ba550\") " pod="openstack/ceilometer-0" Oct 11 04:12:38 crc kubenswrapper[4798]: I1011 04:12:38.579515 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/a0b5cce2-9d60-4726-9660-bac9df48774f-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"a0b5cce2-9d60-4726-9660-bac9df48774f\") " pod="openstack/kube-state-metrics-0" Oct 11 04:12:38 crc kubenswrapper[4798]: I1011 04:12:38.579655 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tbgfn\" (UniqueName: \"kubernetes.io/projected/a0b5cce2-9d60-4726-9660-bac9df48774f-kube-api-access-tbgfn\") pod \"kube-state-metrics-0\" (UID: \"a0b5cce2-9d60-4726-9660-bac9df48774f\") " pod="openstack/kube-state-metrics-0" Oct 11 04:12:38 crc kubenswrapper[4798]: I1011 04:12:38.579773 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lg4mp\" (UniqueName: \"kubernetes.io/projected/d835dc38-8ee2-4971-8e58-72fd231ba550-kube-api-access-lg4mp\") pod \"ceilometer-0\" (UID: \"d835dc38-8ee2-4971-8e58-72fd231ba550\") " pod="openstack/ceilometer-0" Oct 11 04:12:38 crc kubenswrapper[4798]: I1011 04:12:38.591447 4798 scope.go:117] "RemoveContainer" containerID="37b3342c810f4b45d07c05ba69dc2f6742bb593adfbe062d5979768c9f30129b" Oct 11 04:12:38 crc kubenswrapper[4798]: I1011 04:12:38.682228 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lg4mp\" (UniqueName: \"kubernetes.io/projected/d835dc38-8ee2-4971-8e58-72fd231ba550-kube-api-access-lg4mp\") pod \"ceilometer-0\" (UID: \"d835dc38-8ee2-4971-8e58-72fd231ba550\") " pod="openstack/ceilometer-0" Oct 11 04:12:38 crc kubenswrapper[4798]: I1011 04:12:38.682348 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/a0b5cce2-9d60-4726-9660-bac9df48774f-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"a0b5cce2-9d60-4726-9660-bac9df48774f\") " pod="openstack/kube-state-metrics-0" Oct 11 04:12:38 crc kubenswrapper[4798]: I1011 04:12:38.682440 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/d835dc38-8ee2-4971-8e58-72fd231ba550-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"d835dc38-8ee2-4971-8e58-72fd231ba550\") " pod="openstack/ceilometer-0" Oct 11 04:12:38 crc kubenswrapper[4798]: I1011 04:12:38.682493 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d835dc38-8ee2-4971-8e58-72fd231ba550-run-httpd\") pod \"ceilometer-0\" (UID: \"d835dc38-8ee2-4971-8e58-72fd231ba550\") " pod="openstack/ceilometer-0" Oct 11 04:12:38 crc kubenswrapper[4798]: I1011 04:12:38.682520 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d835dc38-8ee2-4971-8e58-72fd231ba550-scripts\") pod \"ceilometer-0\" (UID: \"d835dc38-8ee2-4971-8e58-72fd231ba550\") " pod="openstack/ceilometer-0" Oct 11 04:12:38 crc kubenswrapper[4798]: I1011 04:12:38.682546 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d835dc38-8ee2-4971-8e58-72fd231ba550-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"d835dc38-8ee2-4971-8e58-72fd231ba550\") " pod="openstack/ceilometer-0" Oct 11 04:12:38 crc kubenswrapper[4798]: I1011 04:12:38.682568 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/d835dc38-8ee2-4971-8e58-72fd231ba550-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"d835dc38-8ee2-4971-8e58-72fd231ba550\") " pod="openstack/ceilometer-0" Oct 11 04:12:38 crc kubenswrapper[4798]: I1011 04:12:38.682640 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d835dc38-8ee2-4971-8e58-72fd231ba550-log-httpd\") pod \"ceilometer-0\" (UID: \"d835dc38-8ee2-4971-8e58-72fd231ba550\") " pod="openstack/ceilometer-0" Oct 11 04:12:38 crc kubenswrapper[4798]: I1011 04:12:38.682701 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a0b5cce2-9d60-4726-9660-bac9df48774f-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"a0b5cce2-9d60-4726-9660-bac9df48774f\") " pod="openstack/kube-state-metrics-0" Oct 11 04:12:38 crc kubenswrapper[4798]: I1011 04:12:38.682751 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d835dc38-8ee2-4971-8e58-72fd231ba550-config-data\") pod \"ceilometer-0\" (UID: \"d835dc38-8ee2-4971-8e58-72fd231ba550\") " pod="openstack/ceilometer-0" Oct 11 04:12:38 crc kubenswrapper[4798]: I1011 04:12:38.682809 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/a0b5cce2-9d60-4726-9660-bac9df48774f-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"a0b5cce2-9d60-4726-9660-bac9df48774f\") " pod="openstack/kube-state-metrics-0" Oct 11 04:12:38 crc kubenswrapper[4798]: I1011 04:12:38.682913 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tbgfn\" (UniqueName: \"kubernetes.io/projected/a0b5cce2-9d60-4726-9660-bac9df48774f-kube-api-access-tbgfn\") pod \"kube-state-metrics-0\" (UID: \"a0b5cce2-9d60-4726-9660-bac9df48774f\") " pod="openstack/kube-state-metrics-0" Oct 11 04:12:38 crc kubenswrapper[4798]: I1011 04:12:38.682945 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d835dc38-8ee2-4971-8e58-72fd231ba550-run-httpd\") pod \"ceilometer-0\" (UID: \"d835dc38-8ee2-4971-8e58-72fd231ba550\") " pod="openstack/ceilometer-0" Oct 11 04:12:38 crc kubenswrapper[4798]: I1011 04:12:38.686223 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d835dc38-8ee2-4971-8e58-72fd231ba550-log-httpd\") pod \"ceilometer-0\" (UID: \"d835dc38-8ee2-4971-8e58-72fd231ba550\") " pod="openstack/ceilometer-0" Oct 11 04:12:38 crc kubenswrapper[4798]: I1011 04:12:38.688195 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a0b5cce2-9d60-4726-9660-bac9df48774f-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"a0b5cce2-9d60-4726-9660-bac9df48774f\") " pod="openstack/kube-state-metrics-0" Oct 11 04:12:38 crc kubenswrapper[4798]: I1011 04:12:38.689489 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/a0b5cce2-9d60-4726-9660-bac9df48774f-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"a0b5cce2-9d60-4726-9660-bac9df48774f\") " pod="openstack/kube-state-metrics-0" Oct 11 04:12:38 crc kubenswrapper[4798]: I1011 04:12:38.689564 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/d835dc38-8ee2-4971-8e58-72fd231ba550-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"d835dc38-8ee2-4971-8e58-72fd231ba550\") " pod="openstack/ceilometer-0" Oct 11 04:12:38 crc kubenswrapper[4798]: I1011 04:12:38.689987 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/a0b5cce2-9d60-4726-9660-bac9df48774f-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"a0b5cce2-9d60-4726-9660-bac9df48774f\") " pod="openstack/kube-state-metrics-0" Oct 11 04:12:38 crc kubenswrapper[4798]: I1011 04:12:38.690279 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/d835dc38-8ee2-4971-8e58-72fd231ba550-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"d835dc38-8ee2-4971-8e58-72fd231ba550\") " pod="openstack/ceilometer-0" Oct 11 04:12:38 crc kubenswrapper[4798]: I1011 04:12:38.692166 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d835dc38-8ee2-4971-8e58-72fd231ba550-scripts\") pod \"ceilometer-0\" (UID: \"d835dc38-8ee2-4971-8e58-72fd231ba550\") " pod="openstack/ceilometer-0" Oct 11 04:12:38 crc kubenswrapper[4798]: I1011 04:12:38.692296 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d835dc38-8ee2-4971-8e58-72fd231ba550-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"d835dc38-8ee2-4971-8e58-72fd231ba550\") " pod="openstack/ceilometer-0" Oct 11 04:12:38 crc kubenswrapper[4798]: I1011 04:12:38.702186 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tbgfn\" (UniqueName: \"kubernetes.io/projected/a0b5cce2-9d60-4726-9660-bac9df48774f-kube-api-access-tbgfn\") pod \"kube-state-metrics-0\" (UID: \"a0b5cce2-9d60-4726-9660-bac9df48774f\") " pod="openstack/kube-state-metrics-0" Oct 11 04:12:38 crc kubenswrapper[4798]: I1011 04:12:38.704407 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d835dc38-8ee2-4971-8e58-72fd231ba550-config-data\") pod \"ceilometer-0\" (UID: \"d835dc38-8ee2-4971-8e58-72fd231ba550\") " pod="openstack/ceilometer-0" Oct 11 04:12:38 crc kubenswrapper[4798]: I1011 04:12:38.705708 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lg4mp\" (UniqueName: \"kubernetes.io/projected/d835dc38-8ee2-4971-8e58-72fd231ba550-kube-api-access-lg4mp\") pod \"ceilometer-0\" (UID: \"d835dc38-8ee2-4971-8e58-72fd231ba550\") " pod="openstack/ceilometer-0" Oct 11 04:12:38 crc kubenswrapper[4798]: I1011 04:12:38.859829 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Oct 11 04:12:38 crc kubenswrapper[4798]: I1011 04:12:38.885438 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Oct 11 04:12:39 crc kubenswrapper[4798]: I1011 04:12:39.359167 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Oct 11 04:12:39 crc kubenswrapper[4798]: I1011 04:12:39.435187 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6ff4b0aa-8ae0-4301-8bb1-6978cfa47207" path="/var/lib/kubelet/pods/6ff4b0aa-8ae0-4301-8bb1-6978cfa47207/volumes" Oct 11 04:12:39 crc kubenswrapper[4798]: I1011 04:12:39.436067 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dbe7cb03-b84f-4e72-a995-8d364b9d53ea" path="/var/lib/kubelet/pods/dbe7cb03-b84f-4e72-a995-8d364b9d53ea/volumes" Oct 11 04:12:39 crc kubenswrapper[4798]: I1011 04:12:39.436765 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Oct 11 04:12:39 crc kubenswrapper[4798]: I1011 04:12:39.436788 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d835dc38-8ee2-4971-8e58-72fd231ba550","Type":"ContainerStarted","Data":"e5f241504def991fdd7878ab726827af1bd97388bf74b2af91fefe43417a4f99"} Oct 11 04:12:39 crc kubenswrapper[4798]: W1011 04:12:39.443827 4798 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda0b5cce2_9d60_4726_9660_bac9df48774f.slice/crio-add987f2c8a2b78a634182b847118b72d9a2985389e510621583278e5f75bd7f WatchSource:0}: Error finding container add987f2c8a2b78a634182b847118b72d9a2985389e510621583278e5f75bd7f: Status 404 returned error can't find the container with id add987f2c8a2b78a634182b847118b72d9a2985389e510621583278e5f75bd7f Oct 11 04:12:40 crc kubenswrapper[4798]: I1011 04:12:40.445060 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"a0b5cce2-9d60-4726-9660-bac9df48774f","Type":"ContainerStarted","Data":"e610bb7b63624ec4c592117c01103b36b13aa1907fc62e34b9ce2e3eecd89d71"} Oct 11 04:12:40 crc kubenswrapper[4798]: I1011 04:12:40.445652 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Oct 11 04:12:40 crc kubenswrapper[4798]: I1011 04:12:40.445679 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"a0b5cce2-9d60-4726-9660-bac9df48774f","Type":"ContainerStarted","Data":"add987f2c8a2b78a634182b847118b72d9a2985389e510621583278e5f75bd7f"} Oct 11 04:12:40 crc kubenswrapper[4798]: I1011 04:12:40.447777 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d835dc38-8ee2-4971-8e58-72fd231ba550","Type":"ContainerStarted","Data":"b9aa3ce438905af5d3a7cb7e5bb476b866aac72f7515a94b0f9fbd36c13d37a6"} Oct 11 04:12:40 crc kubenswrapper[4798]: I1011 04:12:40.465568 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=2.104335328 podStartE2EDuration="2.465543134s" podCreationTimestamp="2025-10-11 04:12:38 +0000 UTC" firstStartedPulling="2025-10-11 04:12:39.446753724 +0000 UTC m=+1054.783043410" lastFinishedPulling="2025-10-11 04:12:39.80796153 +0000 UTC m=+1055.144251216" observedRunningTime="2025-10-11 04:12:40.461689552 +0000 UTC m=+1055.797979248" watchObservedRunningTime="2025-10-11 04:12:40.465543134 +0000 UTC m=+1055.801832840" Oct 11 04:12:41 crc kubenswrapper[4798]: I1011 04:12:41.458926 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d835dc38-8ee2-4971-8e58-72fd231ba550","Type":"ContainerStarted","Data":"a56c23419746c57ed9d5655cf57d49a686c4d4f8a0adedff9dfacddc82757571"} Oct 11 04:12:41 crc kubenswrapper[4798]: I1011 04:12:41.459363 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d835dc38-8ee2-4971-8e58-72fd231ba550","Type":"ContainerStarted","Data":"9df641911d8fb862799d2d7e3b93e6fd12a931aea6b1c1581509dcb786929a1c"} Oct 11 04:12:42 crc kubenswrapper[4798]: I1011 04:12:42.785738 4798 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Oct 11 04:12:43 crc kubenswrapper[4798]: I1011 04:12:43.495559 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d835dc38-8ee2-4971-8e58-72fd231ba550","Type":"ContainerStarted","Data":"9da91ff0ef6bfebc0d08a297d8ea6f29c4c6b9d1a1186c7ae172c0e306686825"} Oct 11 04:12:43 crc kubenswrapper[4798]: I1011 04:12:43.496196 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Oct 11 04:12:43 crc kubenswrapper[4798]: I1011 04:12:43.528117 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.440887505 podStartE2EDuration="5.528094131s" podCreationTimestamp="2025-10-11 04:12:38 +0000 UTC" firstStartedPulling="2025-10-11 04:12:39.369878808 +0000 UTC m=+1054.706168494" lastFinishedPulling="2025-10-11 04:12:42.457085434 +0000 UTC m=+1057.793375120" observedRunningTime="2025-10-11 04:12:43.524062515 +0000 UTC m=+1058.860352201" watchObservedRunningTime="2025-10-11 04:12:43.528094131 +0000 UTC m=+1058.864383837" Oct 11 04:12:44 crc kubenswrapper[4798]: I1011 04:12:44.415965 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-766995c844-7nlzq" Oct 11 04:12:45 crc kubenswrapper[4798]: I1011 04:12:45.942884 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-db-create-f42tc"] Oct 11 04:12:45 crc kubenswrapper[4798]: I1011 04:12:45.944538 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-f42tc" Oct 11 04:12:45 crc kubenswrapper[4798]: I1011 04:12:45.956638 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-f42tc"] Oct 11 04:12:46 crc kubenswrapper[4798]: I1011 04:12:46.045754 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-db-create-8vgc6"] Oct 11 04:12:46 crc kubenswrapper[4798]: I1011 04:12:46.047020 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-8vgc6" Oct 11 04:12:46 crc kubenswrapper[4798]: I1011 04:12:46.056058 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-8vgc6"] Oct 11 04:12:46 crc kubenswrapper[4798]: I1011 04:12:46.123850 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6q855\" (UniqueName: \"kubernetes.io/projected/3173f67b-5d6d-4e62-89e0-1e97f7fdc299-kube-api-access-6q855\") pod \"nova-api-db-create-f42tc\" (UID: \"3173f67b-5d6d-4e62-89e0-1e97f7fdc299\") " pod="openstack/nova-api-db-create-f42tc" Oct 11 04:12:46 crc kubenswrapper[4798]: I1011 04:12:46.227606 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vc857\" (UniqueName: \"kubernetes.io/projected/f4a180e5-98b6-4211-8e75-1107f4a3530b-kube-api-access-vc857\") pod \"nova-cell0-db-create-8vgc6\" (UID: \"f4a180e5-98b6-4211-8e75-1107f4a3530b\") " pod="openstack/nova-cell0-db-create-8vgc6" Oct 11 04:12:46 crc kubenswrapper[4798]: I1011 04:12:46.228114 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6q855\" (UniqueName: \"kubernetes.io/projected/3173f67b-5d6d-4e62-89e0-1e97f7fdc299-kube-api-access-6q855\") pod \"nova-api-db-create-f42tc\" (UID: \"3173f67b-5d6d-4e62-89e0-1e97f7fdc299\") " pod="openstack/nova-api-db-create-f42tc" Oct 11 04:12:46 crc kubenswrapper[4798]: I1011 04:12:46.249323 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-db-create-px8b4"] Oct 11 04:12:46 crc kubenswrapper[4798]: I1011 04:12:46.250436 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-px8b4" Oct 11 04:12:46 crc kubenswrapper[4798]: I1011 04:12:46.257932 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-px8b4"] Oct 11 04:12:46 crc kubenswrapper[4798]: I1011 04:12:46.276050 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6q855\" (UniqueName: \"kubernetes.io/projected/3173f67b-5d6d-4e62-89e0-1e97f7fdc299-kube-api-access-6q855\") pod \"nova-api-db-create-f42tc\" (UID: \"3173f67b-5d6d-4e62-89e0-1e97f7fdc299\") " pod="openstack/nova-api-db-create-f42tc" Oct 11 04:12:46 crc kubenswrapper[4798]: I1011 04:12:46.329782 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vc857\" (UniqueName: \"kubernetes.io/projected/f4a180e5-98b6-4211-8e75-1107f4a3530b-kube-api-access-vc857\") pod \"nova-cell0-db-create-8vgc6\" (UID: \"f4a180e5-98b6-4211-8e75-1107f4a3530b\") " pod="openstack/nova-cell0-db-create-8vgc6" Oct 11 04:12:46 crc kubenswrapper[4798]: I1011 04:12:46.344741 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vc857\" (UniqueName: \"kubernetes.io/projected/f4a180e5-98b6-4211-8e75-1107f4a3530b-kube-api-access-vc857\") pod \"nova-cell0-db-create-8vgc6\" (UID: \"f4a180e5-98b6-4211-8e75-1107f4a3530b\") " pod="openstack/nova-cell0-db-create-8vgc6" Oct 11 04:12:46 crc kubenswrapper[4798]: I1011 04:12:46.370697 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-8vgc6" Oct 11 04:12:46 crc kubenswrapper[4798]: I1011 04:12:46.432191 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cj5hl\" (UniqueName: \"kubernetes.io/projected/605e3f63-9cb1-4dc6-8455-ae0a7af9b164-kube-api-access-cj5hl\") pod \"nova-cell1-db-create-px8b4\" (UID: \"605e3f63-9cb1-4dc6-8455-ae0a7af9b164\") " pod="openstack/nova-cell1-db-create-px8b4" Oct 11 04:12:46 crc kubenswrapper[4798]: I1011 04:12:46.535354 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cj5hl\" (UniqueName: \"kubernetes.io/projected/605e3f63-9cb1-4dc6-8455-ae0a7af9b164-kube-api-access-cj5hl\") pod \"nova-cell1-db-create-px8b4\" (UID: \"605e3f63-9cb1-4dc6-8455-ae0a7af9b164\") " pod="openstack/nova-cell1-db-create-px8b4" Oct 11 04:12:46 crc kubenswrapper[4798]: I1011 04:12:46.572577 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cj5hl\" (UniqueName: \"kubernetes.io/projected/605e3f63-9cb1-4dc6-8455-ae0a7af9b164-kube-api-access-cj5hl\") pod \"nova-cell1-db-create-px8b4\" (UID: \"605e3f63-9cb1-4dc6-8455-ae0a7af9b164\") " pod="openstack/nova-cell1-db-create-px8b4" Oct 11 04:12:46 crc kubenswrapper[4798]: I1011 04:12:46.574215 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-f42tc" Oct 11 04:12:46 crc kubenswrapper[4798]: I1011 04:12:46.609045 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-px8b4" Oct 11 04:12:46 crc kubenswrapper[4798]: I1011 04:12:46.876619 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-8vgc6"] Oct 11 04:12:47 crc kubenswrapper[4798]: I1011 04:12:47.121818 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-f42tc"] Oct 11 04:12:47 crc kubenswrapper[4798]: W1011 04:12:47.124940 4798 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3173f67b_5d6d_4e62_89e0_1e97f7fdc299.slice/crio-9cfdf88b865469e5c76b24c97bcf83ebb57877487fdd19a25e06febf5b82e54a WatchSource:0}: Error finding container 9cfdf88b865469e5c76b24c97bcf83ebb57877487fdd19a25e06febf5b82e54a: Status 404 returned error can't find the container with id 9cfdf88b865469e5c76b24c97bcf83ebb57877487fdd19a25e06febf5b82e54a Oct 11 04:12:47 crc kubenswrapper[4798]: I1011 04:12:47.178339 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-px8b4"] Oct 11 04:12:47 crc kubenswrapper[4798]: W1011 04:12:47.190064 4798 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod605e3f63_9cb1_4dc6_8455_ae0a7af9b164.slice/crio-d3046d27aa14dd91176437b826615d68af452317e621bc95bb498daf0ec06145 WatchSource:0}: Error finding container d3046d27aa14dd91176437b826615d68af452317e621bc95bb498daf0ec06145: Status 404 returned error can't find the container with id d3046d27aa14dd91176437b826615d68af452317e621bc95bb498daf0ec06145 Oct 11 04:12:47 crc kubenswrapper[4798]: I1011 04:12:47.534044 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-px8b4" event={"ID":"605e3f63-9cb1-4dc6-8455-ae0a7af9b164","Type":"ContainerStarted","Data":"d3046d27aa14dd91176437b826615d68af452317e621bc95bb498daf0ec06145"} Oct 11 04:12:47 crc kubenswrapper[4798]: I1011 04:12:47.535347 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-f42tc" event={"ID":"3173f67b-5d6d-4e62-89e0-1e97f7fdc299","Type":"ContainerStarted","Data":"9cfdf88b865469e5c76b24c97bcf83ebb57877487fdd19a25e06febf5b82e54a"} Oct 11 04:12:47 crc kubenswrapper[4798]: I1011 04:12:47.536840 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-8vgc6" event={"ID":"f4a180e5-98b6-4211-8e75-1107f4a3530b","Type":"ContainerStarted","Data":"3b3e75ccaaed6b30cbe0a6919c2c1207feeaac8033c2c83045f755eb42c7ecbb"} Oct 11 04:12:47 crc kubenswrapper[4798]: I1011 04:12:47.773220 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-5744f55857-rnj42" Oct 11 04:12:47 crc kubenswrapper[4798]: I1011 04:12:47.841725 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-766995c844-7nlzq"] Oct 11 04:12:47 crc kubenswrapper[4798]: I1011 04:12:47.841971 4798 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-766995c844-7nlzq" podUID="eb2fdf9e-64f1-40bb-b964-73bbe357085c" containerName="neutron-api" containerID="cri-o://30f23467c0fe08eea3dda771f49a3e9eba8c083b26b7234b4d5b9e602e1c2d93" gracePeriod=30 Oct 11 04:12:47 crc kubenswrapper[4798]: I1011 04:12:47.842519 4798 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-766995c844-7nlzq" podUID="eb2fdf9e-64f1-40bb-b964-73bbe357085c" containerName="neutron-httpd" containerID="cri-o://566ced5a5f4c94b8d17adf7b2ff3d1968e3e9d6042c791cf6d12e5ca23768760" gracePeriod=30 Oct 11 04:12:48 crc kubenswrapper[4798]: I1011 04:12:48.551230 4798 generic.go:334] "Generic (PLEG): container finished" podID="3173f67b-5d6d-4e62-89e0-1e97f7fdc299" containerID="4bb45b48c7f376b270a80f3af76073b1e89305955e9f4c24ea09f34f9c84e69b" exitCode=0 Oct 11 04:12:48 crc kubenswrapper[4798]: I1011 04:12:48.551363 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-f42tc" event={"ID":"3173f67b-5d6d-4e62-89e0-1e97f7fdc299","Type":"ContainerDied","Data":"4bb45b48c7f376b270a80f3af76073b1e89305955e9f4c24ea09f34f9c84e69b"} Oct 11 04:12:48 crc kubenswrapper[4798]: I1011 04:12:48.554342 4798 generic.go:334] "Generic (PLEG): container finished" podID="f4a180e5-98b6-4211-8e75-1107f4a3530b" containerID="a353e9b02444c11ce479d86c7230c69f097d8eeac9fcf6abaec7ab52a3a871d6" exitCode=0 Oct 11 04:12:48 crc kubenswrapper[4798]: I1011 04:12:48.554570 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-8vgc6" event={"ID":"f4a180e5-98b6-4211-8e75-1107f4a3530b","Type":"ContainerDied","Data":"a353e9b02444c11ce479d86c7230c69f097d8eeac9fcf6abaec7ab52a3a871d6"} Oct 11 04:12:48 crc kubenswrapper[4798]: I1011 04:12:48.556309 4798 generic.go:334] "Generic (PLEG): container finished" podID="605e3f63-9cb1-4dc6-8455-ae0a7af9b164" containerID="af1db6e406e7a9fd9bc4f359dbf713d537270527d53e1b4c5ced64cc7e242abd" exitCode=0 Oct 11 04:12:48 crc kubenswrapper[4798]: I1011 04:12:48.556532 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-px8b4" event={"ID":"605e3f63-9cb1-4dc6-8455-ae0a7af9b164","Type":"ContainerDied","Data":"af1db6e406e7a9fd9bc4f359dbf713d537270527d53e1b4c5ced64cc7e242abd"} Oct 11 04:12:48 crc kubenswrapper[4798]: I1011 04:12:48.558300 4798 generic.go:334] "Generic (PLEG): container finished" podID="eb2fdf9e-64f1-40bb-b964-73bbe357085c" containerID="566ced5a5f4c94b8d17adf7b2ff3d1968e3e9d6042c791cf6d12e5ca23768760" exitCode=0 Oct 11 04:12:48 crc kubenswrapper[4798]: I1011 04:12:48.558333 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-766995c844-7nlzq" event={"ID":"eb2fdf9e-64f1-40bb-b964-73bbe357085c","Type":"ContainerDied","Data":"566ced5a5f4c94b8d17adf7b2ff3d1968e3e9d6042c791cf6d12e5ca23768760"} Oct 11 04:12:48 crc kubenswrapper[4798]: I1011 04:12:48.902351 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Oct 11 04:12:49 crc kubenswrapper[4798]: I1011 04:12:49.302064 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Oct 11 04:12:49 crc kubenswrapper[4798]: I1011 04:12:49.302370 4798 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="d835dc38-8ee2-4971-8e58-72fd231ba550" containerName="ceilometer-central-agent" containerID="cri-o://b9aa3ce438905af5d3a7cb7e5bb476b866aac72f7515a94b0f9fbd36c13d37a6" gracePeriod=30 Oct 11 04:12:49 crc kubenswrapper[4798]: I1011 04:12:49.302492 4798 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="d835dc38-8ee2-4971-8e58-72fd231ba550" containerName="proxy-httpd" containerID="cri-o://9da91ff0ef6bfebc0d08a297d8ea6f29c4c6b9d1a1186c7ae172c0e306686825" gracePeriod=30 Oct 11 04:12:49 crc kubenswrapper[4798]: I1011 04:12:49.302540 4798 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="d835dc38-8ee2-4971-8e58-72fd231ba550" containerName="sg-core" containerID="cri-o://a56c23419746c57ed9d5655cf57d49a686c4d4f8a0adedff9dfacddc82757571" gracePeriod=30 Oct 11 04:12:49 crc kubenswrapper[4798]: I1011 04:12:49.302621 4798 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="d835dc38-8ee2-4971-8e58-72fd231ba550" containerName="ceilometer-notification-agent" containerID="cri-o://9df641911d8fb862799d2d7e3b93e6fd12a931aea6b1c1581509dcb786929a1c" gracePeriod=30 Oct 11 04:12:49 crc kubenswrapper[4798]: I1011 04:12:49.573859 4798 generic.go:334] "Generic (PLEG): container finished" podID="d835dc38-8ee2-4971-8e58-72fd231ba550" containerID="9da91ff0ef6bfebc0d08a297d8ea6f29c4c6b9d1a1186c7ae172c0e306686825" exitCode=0 Oct 11 04:12:49 crc kubenswrapper[4798]: I1011 04:12:49.573893 4798 generic.go:334] "Generic (PLEG): container finished" podID="d835dc38-8ee2-4971-8e58-72fd231ba550" containerID="a56c23419746c57ed9d5655cf57d49a686c4d4f8a0adedff9dfacddc82757571" exitCode=2 Oct 11 04:12:49 crc kubenswrapper[4798]: I1011 04:12:49.574089 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d835dc38-8ee2-4971-8e58-72fd231ba550","Type":"ContainerDied","Data":"9da91ff0ef6bfebc0d08a297d8ea6f29c4c6b9d1a1186c7ae172c0e306686825"} Oct 11 04:12:49 crc kubenswrapper[4798]: I1011 04:12:49.574117 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d835dc38-8ee2-4971-8e58-72fd231ba550","Type":"ContainerDied","Data":"a56c23419746c57ed9d5655cf57d49a686c4d4f8a0adedff9dfacddc82757571"} Oct 11 04:12:49 crc kubenswrapper[4798]: I1011 04:12:49.992673 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-f42tc" Oct 11 04:12:50 crc kubenswrapper[4798]: I1011 04:12:50.001551 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-px8b4" Oct 11 04:12:50 crc kubenswrapper[4798]: I1011 04:12:50.008226 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-8vgc6" Oct 11 04:12:50 crc kubenswrapper[4798]: I1011 04:12:50.126473 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cj5hl\" (UniqueName: \"kubernetes.io/projected/605e3f63-9cb1-4dc6-8455-ae0a7af9b164-kube-api-access-cj5hl\") pod \"605e3f63-9cb1-4dc6-8455-ae0a7af9b164\" (UID: \"605e3f63-9cb1-4dc6-8455-ae0a7af9b164\") " Oct 11 04:12:50 crc kubenswrapper[4798]: I1011 04:12:50.126538 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vc857\" (UniqueName: \"kubernetes.io/projected/f4a180e5-98b6-4211-8e75-1107f4a3530b-kube-api-access-vc857\") pod \"f4a180e5-98b6-4211-8e75-1107f4a3530b\" (UID: \"f4a180e5-98b6-4211-8e75-1107f4a3530b\") " Oct 11 04:12:50 crc kubenswrapper[4798]: I1011 04:12:50.126702 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6q855\" (UniqueName: \"kubernetes.io/projected/3173f67b-5d6d-4e62-89e0-1e97f7fdc299-kube-api-access-6q855\") pod \"3173f67b-5d6d-4e62-89e0-1e97f7fdc299\" (UID: \"3173f67b-5d6d-4e62-89e0-1e97f7fdc299\") " Oct 11 04:12:50 crc kubenswrapper[4798]: I1011 04:12:50.134339 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/605e3f63-9cb1-4dc6-8455-ae0a7af9b164-kube-api-access-cj5hl" (OuterVolumeSpecName: "kube-api-access-cj5hl") pod "605e3f63-9cb1-4dc6-8455-ae0a7af9b164" (UID: "605e3f63-9cb1-4dc6-8455-ae0a7af9b164"). InnerVolumeSpecName "kube-api-access-cj5hl". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 04:12:50 crc kubenswrapper[4798]: I1011 04:12:50.135130 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3173f67b-5d6d-4e62-89e0-1e97f7fdc299-kube-api-access-6q855" (OuterVolumeSpecName: "kube-api-access-6q855") pod "3173f67b-5d6d-4e62-89e0-1e97f7fdc299" (UID: "3173f67b-5d6d-4e62-89e0-1e97f7fdc299"). InnerVolumeSpecName "kube-api-access-6q855". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 04:12:50 crc kubenswrapper[4798]: I1011 04:12:50.139149 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f4a180e5-98b6-4211-8e75-1107f4a3530b-kube-api-access-vc857" (OuterVolumeSpecName: "kube-api-access-vc857") pod "f4a180e5-98b6-4211-8e75-1107f4a3530b" (UID: "f4a180e5-98b6-4211-8e75-1107f4a3530b"). InnerVolumeSpecName "kube-api-access-vc857". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 04:12:50 crc kubenswrapper[4798]: I1011 04:12:50.228730 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6q855\" (UniqueName: \"kubernetes.io/projected/3173f67b-5d6d-4e62-89e0-1e97f7fdc299-kube-api-access-6q855\") on node \"crc\" DevicePath \"\"" Oct 11 04:12:50 crc kubenswrapper[4798]: I1011 04:12:50.228771 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cj5hl\" (UniqueName: \"kubernetes.io/projected/605e3f63-9cb1-4dc6-8455-ae0a7af9b164-kube-api-access-cj5hl\") on node \"crc\" DevicePath \"\"" Oct 11 04:12:50 crc kubenswrapper[4798]: I1011 04:12:50.228781 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vc857\" (UniqueName: \"kubernetes.io/projected/f4a180e5-98b6-4211-8e75-1107f4a3530b-kube-api-access-vc857\") on node \"crc\" DevicePath \"\"" Oct 11 04:12:50 crc kubenswrapper[4798]: I1011 04:12:50.573832 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Oct 11 04:12:50 crc kubenswrapper[4798]: I1011 04:12:50.584513 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-8vgc6" event={"ID":"f4a180e5-98b6-4211-8e75-1107f4a3530b","Type":"ContainerDied","Data":"3b3e75ccaaed6b30cbe0a6919c2c1207feeaac8033c2c83045f755eb42c7ecbb"} Oct 11 04:12:50 crc kubenswrapper[4798]: I1011 04:12:50.584566 4798 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3b3e75ccaaed6b30cbe0a6919c2c1207feeaac8033c2c83045f755eb42c7ecbb" Oct 11 04:12:50 crc kubenswrapper[4798]: I1011 04:12:50.584707 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-8vgc6" Oct 11 04:12:50 crc kubenswrapper[4798]: I1011 04:12:50.586102 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-px8b4" event={"ID":"605e3f63-9cb1-4dc6-8455-ae0a7af9b164","Type":"ContainerDied","Data":"d3046d27aa14dd91176437b826615d68af452317e621bc95bb498daf0ec06145"} Oct 11 04:12:50 crc kubenswrapper[4798]: I1011 04:12:50.586124 4798 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d3046d27aa14dd91176437b826615d68af452317e621bc95bb498daf0ec06145" Oct 11 04:12:50 crc kubenswrapper[4798]: I1011 04:12:50.586206 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-px8b4" Oct 11 04:12:50 crc kubenswrapper[4798]: I1011 04:12:50.591766 4798 generic.go:334] "Generic (PLEG): container finished" podID="d835dc38-8ee2-4971-8e58-72fd231ba550" containerID="9df641911d8fb862799d2d7e3b93e6fd12a931aea6b1c1581509dcb786929a1c" exitCode=0 Oct 11 04:12:50 crc kubenswrapper[4798]: I1011 04:12:50.591819 4798 generic.go:334] "Generic (PLEG): container finished" podID="d835dc38-8ee2-4971-8e58-72fd231ba550" containerID="b9aa3ce438905af5d3a7cb7e5bb476b866aac72f7515a94b0f9fbd36c13d37a6" exitCode=0 Oct 11 04:12:50 crc kubenswrapper[4798]: I1011 04:12:50.591994 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Oct 11 04:12:50 crc kubenswrapper[4798]: I1011 04:12:50.592243 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d835dc38-8ee2-4971-8e58-72fd231ba550","Type":"ContainerDied","Data":"9df641911d8fb862799d2d7e3b93e6fd12a931aea6b1c1581509dcb786929a1c"} Oct 11 04:12:50 crc kubenswrapper[4798]: I1011 04:12:50.592294 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d835dc38-8ee2-4971-8e58-72fd231ba550","Type":"ContainerDied","Data":"b9aa3ce438905af5d3a7cb7e5bb476b866aac72f7515a94b0f9fbd36c13d37a6"} Oct 11 04:12:50 crc kubenswrapper[4798]: I1011 04:12:50.592307 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d835dc38-8ee2-4971-8e58-72fd231ba550","Type":"ContainerDied","Data":"e5f241504def991fdd7878ab726827af1bd97388bf74b2af91fefe43417a4f99"} Oct 11 04:12:50 crc kubenswrapper[4798]: I1011 04:12:50.592329 4798 scope.go:117] "RemoveContainer" containerID="9da91ff0ef6bfebc0d08a297d8ea6f29c4c6b9d1a1186c7ae172c0e306686825" Oct 11 04:12:50 crc kubenswrapper[4798]: I1011 04:12:50.595456 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-f42tc" event={"ID":"3173f67b-5d6d-4e62-89e0-1e97f7fdc299","Type":"ContainerDied","Data":"9cfdf88b865469e5c76b24c97bcf83ebb57877487fdd19a25e06febf5b82e54a"} Oct 11 04:12:50 crc kubenswrapper[4798]: I1011 04:12:50.595489 4798 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9cfdf88b865469e5c76b24c97bcf83ebb57877487fdd19a25e06febf5b82e54a" Oct 11 04:12:50 crc kubenswrapper[4798]: I1011 04:12:50.595550 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-f42tc" Oct 11 04:12:50 crc kubenswrapper[4798]: I1011 04:12:50.615807 4798 scope.go:117] "RemoveContainer" containerID="a56c23419746c57ed9d5655cf57d49a686c4d4f8a0adedff9dfacddc82757571" Oct 11 04:12:50 crc kubenswrapper[4798]: I1011 04:12:50.635713 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d835dc38-8ee2-4971-8e58-72fd231ba550-log-httpd\") pod \"d835dc38-8ee2-4971-8e58-72fd231ba550\" (UID: \"d835dc38-8ee2-4971-8e58-72fd231ba550\") " Oct 11 04:12:50 crc kubenswrapper[4798]: I1011 04:12:50.635765 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/d835dc38-8ee2-4971-8e58-72fd231ba550-ceilometer-tls-certs\") pod \"d835dc38-8ee2-4971-8e58-72fd231ba550\" (UID: \"d835dc38-8ee2-4971-8e58-72fd231ba550\") " Oct 11 04:12:50 crc kubenswrapper[4798]: I1011 04:12:50.635798 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d835dc38-8ee2-4971-8e58-72fd231ba550-config-data\") pod \"d835dc38-8ee2-4971-8e58-72fd231ba550\" (UID: \"d835dc38-8ee2-4971-8e58-72fd231ba550\") " Oct 11 04:12:50 crc kubenswrapper[4798]: I1011 04:12:50.635906 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/d835dc38-8ee2-4971-8e58-72fd231ba550-sg-core-conf-yaml\") pod \"d835dc38-8ee2-4971-8e58-72fd231ba550\" (UID: \"d835dc38-8ee2-4971-8e58-72fd231ba550\") " Oct 11 04:12:50 crc kubenswrapper[4798]: I1011 04:12:50.635995 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d835dc38-8ee2-4971-8e58-72fd231ba550-combined-ca-bundle\") pod \"d835dc38-8ee2-4971-8e58-72fd231ba550\" (UID: \"d835dc38-8ee2-4971-8e58-72fd231ba550\") " Oct 11 04:12:50 crc kubenswrapper[4798]: I1011 04:12:50.636026 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lg4mp\" (UniqueName: \"kubernetes.io/projected/d835dc38-8ee2-4971-8e58-72fd231ba550-kube-api-access-lg4mp\") pod \"d835dc38-8ee2-4971-8e58-72fd231ba550\" (UID: \"d835dc38-8ee2-4971-8e58-72fd231ba550\") " Oct 11 04:12:50 crc kubenswrapper[4798]: I1011 04:12:50.636081 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d835dc38-8ee2-4971-8e58-72fd231ba550-scripts\") pod \"d835dc38-8ee2-4971-8e58-72fd231ba550\" (UID: \"d835dc38-8ee2-4971-8e58-72fd231ba550\") " Oct 11 04:12:50 crc kubenswrapper[4798]: I1011 04:12:50.636128 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d835dc38-8ee2-4971-8e58-72fd231ba550-run-httpd\") pod \"d835dc38-8ee2-4971-8e58-72fd231ba550\" (UID: \"d835dc38-8ee2-4971-8e58-72fd231ba550\") " Oct 11 04:12:50 crc kubenswrapper[4798]: I1011 04:12:50.636177 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d835dc38-8ee2-4971-8e58-72fd231ba550-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "d835dc38-8ee2-4971-8e58-72fd231ba550" (UID: "d835dc38-8ee2-4971-8e58-72fd231ba550"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 04:12:50 crc kubenswrapper[4798]: I1011 04:12:50.636514 4798 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d835dc38-8ee2-4971-8e58-72fd231ba550-log-httpd\") on node \"crc\" DevicePath \"\"" Oct 11 04:12:50 crc kubenswrapper[4798]: I1011 04:12:50.636909 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d835dc38-8ee2-4971-8e58-72fd231ba550-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "d835dc38-8ee2-4971-8e58-72fd231ba550" (UID: "d835dc38-8ee2-4971-8e58-72fd231ba550"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 04:12:50 crc kubenswrapper[4798]: I1011 04:12:50.637073 4798 scope.go:117] "RemoveContainer" containerID="9df641911d8fb862799d2d7e3b93e6fd12a931aea6b1c1581509dcb786929a1c" Oct 11 04:12:50 crc kubenswrapper[4798]: I1011 04:12:50.642037 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d835dc38-8ee2-4971-8e58-72fd231ba550-kube-api-access-lg4mp" (OuterVolumeSpecName: "kube-api-access-lg4mp") pod "d835dc38-8ee2-4971-8e58-72fd231ba550" (UID: "d835dc38-8ee2-4971-8e58-72fd231ba550"). InnerVolumeSpecName "kube-api-access-lg4mp". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 04:12:50 crc kubenswrapper[4798]: I1011 04:12:50.655826 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d835dc38-8ee2-4971-8e58-72fd231ba550-scripts" (OuterVolumeSpecName: "scripts") pod "d835dc38-8ee2-4971-8e58-72fd231ba550" (UID: "d835dc38-8ee2-4971-8e58-72fd231ba550"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:12:50 crc kubenswrapper[4798]: I1011 04:12:50.692093 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d835dc38-8ee2-4971-8e58-72fd231ba550-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "d835dc38-8ee2-4971-8e58-72fd231ba550" (UID: "d835dc38-8ee2-4971-8e58-72fd231ba550"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:12:50 crc kubenswrapper[4798]: I1011 04:12:50.711609 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d835dc38-8ee2-4971-8e58-72fd231ba550-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d835dc38-8ee2-4971-8e58-72fd231ba550" (UID: "d835dc38-8ee2-4971-8e58-72fd231ba550"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:12:50 crc kubenswrapper[4798]: I1011 04:12:50.726381 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d835dc38-8ee2-4971-8e58-72fd231ba550-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "d835dc38-8ee2-4971-8e58-72fd231ba550" (UID: "d835dc38-8ee2-4971-8e58-72fd231ba550"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:12:50 crc kubenswrapper[4798]: I1011 04:12:50.735381 4798 scope.go:117] "RemoveContainer" containerID="b9aa3ce438905af5d3a7cb7e5bb476b866aac72f7515a94b0f9fbd36c13d37a6" Oct 11 04:12:50 crc kubenswrapper[4798]: I1011 04:12:50.738211 4798 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d835dc38-8ee2-4971-8e58-72fd231ba550-run-httpd\") on node \"crc\" DevicePath \"\"" Oct 11 04:12:50 crc kubenswrapper[4798]: I1011 04:12:50.738292 4798 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/d835dc38-8ee2-4971-8e58-72fd231ba550-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Oct 11 04:12:50 crc kubenswrapper[4798]: I1011 04:12:50.738383 4798 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/d835dc38-8ee2-4971-8e58-72fd231ba550-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Oct 11 04:12:50 crc kubenswrapper[4798]: I1011 04:12:50.738488 4798 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d835dc38-8ee2-4971-8e58-72fd231ba550-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 11 04:12:50 crc kubenswrapper[4798]: I1011 04:12:50.738545 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lg4mp\" (UniqueName: \"kubernetes.io/projected/d835dc38-8ee2-4971-8e58-72fd231ba550-kube-api-access-lg4mp\") on node \"crc\" DevicePath \"\"" Oct 11 04:12:50 crc kubenswrapper[4798]: I1011 04:12:50.738600 4798 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d835dc38-8ee2-4971-8e58-72fd231ba550-scripts\") on node \"crc\" DevicePath \"\"" Oct 11 04:12:50 crc kubenswrapper[4798]: I1011 04:12:50.754521 4798 scope.go:117] "RemoveContainer" containerID="9da91ff0ef6bfebc0d08a297d8ea6f29c4c6b9d1a1186c7ae172c0e306686825" Oct 11 04:12:50 crc kubenswrapper[4798]: E1011 04:12:50.755202 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9da91ff0ef6bfebc0d08a297d8ea6f29c4c6b9d1a1186c7ae172c0e306686825\": container with ID starting with 9da91ff0ef6bfebc0d08a297d8ea6f29c4c6b9d1a1186c7ae172c0e306686825 not found: ID does not exist" containerID="9da91ff0ef6bfebc0d08a297d8ea6f29c4c6b9d1a1186c7ae172c0e306686825" Oct 11 04:12:50 crc kubenswrapper[4798]: I1011 04:12:50.755257 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9da91ff0ef6bfebc0d08a297d8ea6f29c4c6b9d1a1186c7ae172c0e306686825"} err="failed to get container status \"9da91ff0ef6bfebc0d08a297d8ea6f29c4c6b9d1a1186c7ae172c0e306686825\": rpc error: code = NotFound desc = could not find container \"9da91ff0ef6bfebc0d08a297d8ea6f29c4c6b9d1a1186c7ae172c0e306686825\": container with ID starting with 9da91ff0ef6bfebc0d08a297d8ea6f29c4c6b9d1a1186c7ae172c0e306686825 not found: ID does not exist" Oct 11 04:12:50 crc kubenswrapper[4798]: I1011 04:12:50.755280 4798 scope.go:117] "RemoveContainer" containerID="a56c23419746c57ed9d5655cf57d49a686c4d4f8a0adedff9dfacddc82757571" Oct 11 04:12:50 crc kubenswrapper[4798]: I1011 04:12:50.755278 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d835dc38-8ee2-4971-8e58-72fd231ba550-config-data" (OuterVolumeSpecName: "config-data") pod "d835dc38-8ee2-4971-8e58-72fd231ba550" (UID: "d835dc38-8ee2-4971-8e58-72fd231ba550"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:12:50 crc kubenswrapper[4798]: E1011 04:12:50.755773 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a56c23419746c57ed9d5655cf57d49a686c4d4f8a0adedff9dfacddc82757571\": container with ID starting with a56c23419746c57ed9d5655cf57d49a686c4d4f8a0adedff9dfacddc82757571 not found: ID does not exist" containerID="a56c23419746c57ed9d5655cf57d49a686c4d4f8a0adedff9dfacddc82757571" Oct 11 04:12:50 crc kubenswrapper[4798]: I1011 04:12:50.755828 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a56c23419746c57ed9d5655cf57d49a686c4d4f8a0adedff9dfacddc82757571"} err="failed to get container status \"a56c23419746c57ed9d5655cf57d49a686c4d4f8a0adedff9dfacddc82757571\": rpc error: code = NotFound desc = could not find container \"a56c23419746c57ed9d5655cf57d49a686c4d4f8a0adedff9dfacddc82757571\": container with ID starting with a56c23419746c57ed9d5655cf57d49a686c4d4f8a0adedff9dfacddc82757571 not found: ID does not exist" Oct 11 04:12:50 crc kubenswrapper[4798]: I1011 04:12:50.755848 4798 scope.go:117] "RemoveContainer" containerID="9df641911d8fb862799d2d7e3b93e6fd12a931aea6b1c1581509dcb786929a1c" Oct 11 04:12:50 crc kubenswrapper[4798]: E1011 04:12:50.756161 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9df641911d8fb862799d2d7e3b93e6fd12a931aea6b1c1581509dcb786929a1c\": container with ID starting with 9df641911d8fb862799d2d7e3b93e6fd12a931aea6b1c1581509dcb786929a1c not found: ID does not exist" containerID="9df641911d8fb862799d2d7e3b93e6fd12a931aea6b1c1581509dcb786929a1c" Oct 11 04:12:50 crc kubenswrapper[4798]: I1011 04:12:50.756252 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9df641911d8fb862799d2d7e3b93e6fd12a931aea6b1c1581509dcb786929a1c"} err="failed to get container status \"9df641911d8fb862799d2d7e3b93e6fd12a931aea6b1c1581509dcb786929a1c\": rpc error: code = NotFound desc = could not find container \"9df641911d8fb862799d2d7e3b93e6fd12a931aea6b1c1581509dcb786929a1c\": container with ID starting with 9df641911d8fb862799d2d7e3b93e6fd12a931aea6b1c1581509dcb786929a1c not found: ID does not exist" Oct 11 04:12:50 crc kubenswrapper[4798]: I1011 04:12:50.756328 4798 scope.go:117] "RemoveContainer" containerID="b9aa3ce438905af5d3a7cb7e5bb476b866aac72f7515a94b0f9fbd36c13d37a6" Oct 11 04:12:50 crc kubenswrapper[4798]: E1011 04:12:50.757012 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b9aa3ce438905af5d3a7cb7e5bb476b866aac72f7515a94b0f9fbd36c13d37a6\": container with ID starting with b9aa3ce438905af5d3a7cb7e5bb476b866aac72f7515a94b0f9fbd36c13d37a6 not found: ID does not exist" containerID="b9aa3ce438905af5d3a7cb7e5bb476b866aac72f7515a94b0f9fbd36c13d37a6" Oct 11 04:12:50 crc kubenswrapper[4798]: I1011 04:12:50.757067 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b9aa3ce438905af5d3a7cb7e5bb476b866aac72f7515a94b0f9fbd36c13d37a6"} err="failed to get container status \"b9aa3ce438905af5d3a7cb7e5bb476b866aac72f7515a94b0f9fbd36c13d37a6\": rpc error: code = NotFound desc = could not find container \"b9aa3ce438905af5d3a7cb7e5bb476b866aac72f7515a94b0f9fbd36c13d37a6\": container with ID starting with b9aa3ce438905af5d3a7cb7e5bb476b866aac72f7515a94b0f9fbd36c13d37a6 not found: ID does not exist" Oct 11 04:12:50 crc kubenswrapper[4798]: I1011 04:12:50.757093 4798 scope.go:117] "RemoveContainer" containerID="9da91ff0ef6bfebc0d08a297d8ea6f29c4c6b9d1a1186c7ae172c0e306686825" Oct 11 04:12:50 crc kubenswrapper[4798]: I1011 04:12:50.757422 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9da91ff0ef6bfebc0d08a297d8ea6f29c4c6b9d1a1186c7ae172c0e306686825"} err="failed to get container status \"9da91ff0ef6bfebc0d08a297d8ea6f29c4c6b9d1a1186c7ae172c0e306686825\": rpc error: code = NotFound desc = could not find container \"9da91ff0ef6bfebc0d08a297d8ea6f29c4c6b9d1a1186c7ae172c0e306686825\": container with ID starting with 9da91ff0ef6bfebc0d08a297d8ea6f29c4c6b9d1a1186c7ae172c0e306686825 not found: ID does not exist" Oct 11 04:12:50 crc kubenswrapper[4798]: I1011 04:12:50.757456 4798 scope.go:117] "RemoveContainer" containerID="a56c23419746c57ed9d5655cf57d49a686c4d4f8a0adedff9dfacddc82757571" Oct 11 04:12:50 crc kubenswrapper[4798]: I1011 04:12:50.757864 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a56c23419746c57ed9d5655cf57d49a686c4d4f8a0adedff9dfacddc82757571"} err="failed to get container status \"a56c23419746c57ed9d5655cf57d49a686c4d4f8a0adedff9dfacddc82757571\": rpc error: code = NotFound desc = could not find container \"a56c23419746c57ed9d5655cf57d49a686c4d4f8a0adedff9dfacddc82757571\": container with ID starting with a56c23419746c57ed9d5655cf57d49a686c4d4f8a0adedff9dfacddc82757571 not found: ID does not exist" Oct 11 04:12:50 crc kubenswrapper[4798]: I1011 04:12:50.757903 4798 scope.go:117] "RemoveContainer" containerID="9df641911d8fb862799d2d7e3b93e6fd12a931aea6b1c1581509dcb786929a1c" Oct 11 04:12:50 crc kubenswrapper[4798]: I1011 04:12:50.758288 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9df641911d8fb862799d2d7e3b93e6fd12a931aea6b1c1581509dcb786929a1c"} err="failed to get container status \"9df641911d8fb862799d2d7e3b93e6fd12a931aea6b1c1581509dcb786929a1c\": rpc error: code = NotFound desc = could not find container \"9df641911d8fb862799d2d7e3b93e6fd12a931aea6b1c1581509dcb786929a1c\": container with ID starting with 9df641911d8fb862799d2d7e3b93e6fd12a931aea6b1c1581509dcb786929a1c not found: ID does not exist" Oct 11 04:12:50 crc kubenswrapper[4798]: I1011 04:12:50.758332 4798 scope.go:117] "RemoveContainer" containerID="b9aa3ce438905af5d3a7cb7e5bb476b866aac72f7515a94b0f9fbd36c13d37a6" Oct 11 04:12:50 crc kubenswrapper[4798]: I1011 04:12:50.758726 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b9aa3ce438905af5d3a7cb7e5bb476b866aac72f7515a94b0f9fbd36c13d37a6"} err="failed to get container status \"b9aa3ce438905af5d3a7cb7e5bb476b866aac72f7515a94b0f9fbd36c13d37a6\": rpc error: code = NotFound desc = could not find container \"b9aa3ce438905af5d3a7cb7e5bb476b866aac72f7515a94b0f9fbd36c13d37a6\": container with ID starting with b9aa3ce438905af5d3a7cb7e5bb476b866aac72f7515a94b0f9fbd36c13d37a6 not found: ID does not exist" Oct 11 04:12:50 crc kubenswrapper[4798]: I1011 04:12:50.841027 4798 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d835dc38-8ee2-4971-8e58-72fd231ba550-config-data\") on node \"crc\" DevicePath \"\"" Oct 11 04:12:50 crc kubenswrapper[4798]: I1011 04:12:50.923240 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Oct 11 04:12:50 crc kubenswrapper[4798]: I1011 04:12:50.929151 4798 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Oct 11 04:12:50 crc kubenswrapper[4798]: I1011 04:12:50.949914 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Oct 11 04:12:50 crc kubenswrapper[4798]: E1011 04:12:50.950432 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4a180e5-98b6-4211-8e75-1107f4a3530b" containerName="mariadb-database-create" Oct 11 04:12:50 crc kubenswrapper[4798]: I1011 04:12:50.950458 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4a180e5-98b6-4211-8e75-1107f4a3530b" containerName="mariadb-database-create" Oct 11 04:12:50 crc kubenswrapper[4798]: E1011 04:12:50.950471 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d835dc38-8ee2-4971-8e58-72fd231ba550" containerName="sg-core" Oct 11 04:12:50 crc kubenswrapper[4798]: I1011 04:12:50.950480 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="d835dc38-8ee2-4971-8e58-72fd231ba550" containerName="sg-core" Oct 11 04:12:50 crc kubenswrapper[4798]: E1011 04:12:50.950513 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d835dc38-8ee2-4971-8e58-72fd231ba550" containerName="ceilometer-notification-agent" Oct 11 04:12:50 crc kubenswrapper[4798]: I1011 04:12:50.950521 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="d835dc38-8ee2-4971-8e58-72fd231ba550" containerName="ceilometer-notification-agent" Oct 11 04:12:50 crc kubenswrapper[4798]: E1011 04:12:50.950531 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="605e3f63-9cb1-4dc6-8455-ae0a7af9b164" containerName="mariadb-database-create" Oct 11 04:12:50 crc kubenswrapper[4798]: I1011 04:12:50.950538 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="605e3f63-9cb1-4dc6-8455-ae0a7af9b164" containerName="mariadb-database-create" Oct 11 04:12:50 crc kubenswrapper[4798]: E1011 04:12:50.950552 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3173f67b-5d6d-4e62-89e0-1e97f7fdc299" containerName="mariadb-database-create" Oct 11 04:12:50 crc kubenswrapper[4798]: I1011 04:12:50.950561 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="3173f67b-5d6d-4e62-89e0-1e97f7fdc299" containerName="mariadb-database-create" Oct 11 04:12:50 crc kubenswrapper[4798]: E1011 04:12:50.950576 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d835dc38-8ee2-4971-8e58-72fd231ba550" containerName="ceilometer-central-agent" Oct 11 04:12:50 crc kubenswrapper[4798]: I1011 04:12:50.950584 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="d835dc38-8ee2-4971-8e58-72fd231ba550" containerName="ceilometer-central-agent" Oct 11 04:12:50 crc kubenswrapper[4798]: E1011 04:12:50.950606 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d835dc38-8ee2-4971-8e58-72fd231ba550" containerName="proxy-httpd" Oct 11 04:12:50 crc kubenswrapper[4798]: I1011 04:12:50.950614 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="d835dc38-8ee2-4971-8e58-72fd231ba550" containerName="proxy-httpd" Oct 11 04:12:50 crc kubenswrapper[4798]: I1011 04:12:50.950832 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="d835dc38-8ee2-4971-8e58-72fd231ba550" containerName="proxy-httpd" Oct 11 04:12:50 crc kubenswrapper[4798]: I1011 04:12:50.950847 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="3173f67b-5d6d-4e62-89e0-1e97f7fdc299" containerName="mariadb-database-create" Oct 11 04:12:50 crc kubenswrapper[4798]: I1011 04:12:50.950864 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="605e3f63-9cb1-4dc6-8455-ae0a7af9b164" containerName="mariadb-database-create" Oct 11 04:12:50 crc kubenswrapper[4798]: I1011 04:12:50.950879 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="d835dc38-8ee2-4971-8e58-72fd231ba550" containerName="ceilometer-central-agent" Oct 11 04:12:50 crc kubenswrapper[4798]: I1011 04:12:50.950893 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="d835dc38-8ee2-4971-8e58-72fd231ba550" containerName="sg-core" Oct 11 04:12:50 crc kubenswrapper[4798]: I1011 04:12:50.950905 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="d835dc38-8ee2-4971-8e58-72fd231ba550" containerName="ceilometer-notification-agent" Oct 11 04:12:50 crc kubenswrapper[4798]: I1011 04:12:50.950917 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4a180e5-98b6-4211-8e75-1107f4a3530b" containerName="mariadb-database-create" Oct 11 04:12:50 crc kubenswrapper[4798]: I1011 04:12:50.962151 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Oct 11 04:12:50 crc kubenswrapper[4798]: I1011 04:12:50.965345 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Oct 11 04:12:50 crc kubenswrapper[4798]: I1011 04:12:50.965427 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Oct 11 04:12:50 crc kubenswrapper[4798]: I1011 04:12:50.965607 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Oct 11 04:12:50 crc kubenswrapper[4798]: I1011 04:12:50.974967 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Oct 11 04:12:51 crc kubenswrapper[4798]: I1011 04:12:51.045547 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e3e55d17-f35e-4c87-b08a-8f6d8800ebf3-log-httpd\") pod \"ceilometer-0\" (UID: \"e3e55d17-f35e-4c87-b08a-8f6d8800ebf3\") " pod="openstack/ceilometer-0" Oct 11 04:12:51 crc kubenswrapper[4798]: I1011 04:12:51.045606 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e3e55d17-f35e-4c87-b08a-8f6d8800ebf3-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"e3e55d17-f35e-4c87-b08a-8f6d8800ebf3\") " pod="openstack/ceilometer-0" Oct 11 04:12:51 crc kubenswrapper[4798]: I1011 04:12:51.045637 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e3e55d17-f35e-4c87-b08a-8f6d8800ebf3-scripts\") pod \"ceilometer-0\" (UID: \"e3e55d17-f35e-4c87-b08a-8f6d8800ebf3\") " pod="openstack/ceilometer-0" Oct 11 04:12:51 crc kubenswrapper[4798]: I1011 04:12:51.045875 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lmw7w\" (UniqueName: \"kubernetes.io/projected/e3e55d17-f35e-4c87-b08a-8f6d8800ebf3-kube-api-access-lmw7w\") pod \"ceilometer-0\" (UID: \"e3e55d17-f35e-4c87-b08a-8f6d8800ebf3\") " pod="openstack/ceilometer-0" Oct 11 04:12:51 crc kubenswrapper[4798]: I1011 04:12:51.046076 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/e3e55d17-f35e-4c87-b08a-8f6d8800ebf3-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"e3e55d17-f35e-4c87-b08a-8f6d8800ebf3\") " pod="openstack/ceilometer-0" Oct 11 04:12:51 crc kubenswrapper[4798]: I1011 04:12:51.046127 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/e3e55d17-f35e-4c87-b08a-8f6d8800ebf3-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"e3e55d17-f35e-4c87-b08a-8f6d8800ebf3\") " pod="openstack/ceilometer-0" Oct 11 04:12:51 crc kubenswrapper[4798]: I1011 04:12:51.046174 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e3e55d17-f35e-4c87-b08a-8f6d8800ebf3-run-httpd\") pod \"ceilometer-0\" (UID: \"e3e55d17-f35e-4c87-b08a-8f6d8800ebf3\") " pod="openstack/ceilometer-0" Oct 11 04:12:51 crc kubenswrapper[4798]: I1011 04:12:51.046243 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e3e55d17-f35e-4c87-b08a-8f6d8800ebf3-config-data\") pod \"ceilometer-0\" (UID: \"e3e55d17-f35e-4c87-b08a-8f6d8800ebf3\") " pod="openstack/ceilometer-0" Oct 11 04:12:51 crc kubenswrapper[4798]: I1011 04:12:51.148372 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lmw7w\" (UniqueName: \"kubernetes.io/projected/e3e55d17-f35e-4c87-b08a-8f6d8800ebf3-kube-api-access-lmw7w\") pod \"ceilometer-0\" (UID: \"e3e55d17-f35e-4c87-b08a-8f6d8800ebf3\") " pod="openstack/ceilometer-0" Oct 11 04:12:51 crc kubenswrapper[4798]: I1011 04:12:51.148468 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/e3e55d17-f35e-4c87-b08a-8f6d8800ebf3-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"e3e55d17-f35e-4c87-b08a-8f6d8800ebf3\") " pod="openstack/ceilometer-0" Oct 11 04:12:51 crc kubenswrapper[4798]: I1011 04:12:51.148494 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/e3e55d17-f35e-4c87-b08a-8f6d8800ebf3-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"e3e55d17-f35e-4c87-b08a-8f6d8800ebf3\") " pod="openstack/ceilometer-0" Oct 11 04:12:51 crc kubenswrapper[4798]: I1011 04:12:51.148516 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e3e55d17-f35e-4c87-b08a-8f6d8800ebf3-run-httpd\") pod \"ceilometer-0\" (UID: \"e3e55d17-f35e-4c87-b08a-8f6d8800ebf3\") " pod="openstack/ceilometer-0" Oct 11 04:12:51 crc kubenswrapper[4798]: I1011 04:12:51.148547 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e3e55d17-f35e-4c87-b08a-8f6d8800ebf3-config-data\") pod \"ceilometer-0\" (UID: \"e3e55d17-f35e-4c87-b08a-8f6d8800ebf3\") " pod="openstack/ceilometer-0" Oct 11 04:12:51 crc kubenswrapper[4798]: I1011 04:12:51.148571 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e3e55d17-f35e-4c87-b08a-8f6d8800ebf3-log-httpd\") pod \"ceilometer-0\" (UID: \"e3e55d17-f35e-4c87-b08a-8f6d8800ebf3\") " pod="openstack/ceilometer-0" Oct 11 04:12:51 crc kubenswrapper[4798]: I1011 04:12:51.148596 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e3e55d17-f35e-4c87-b08a-8f6d8800ebf3-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"e3e55d17-f35e-4c87-b08a-8f6d8800ebf3\") " pod="openstack/ceilometer-0" Oct 11 04:12:51 crc kubenswrapper[4798]: I1011 04:12:51.148617 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e3e55d17-f35e-4c87-b08a-8f6d8800ebf3-scripts\") pod \"ceilometer-0\" (UID: \"e3e55d17-f35e-4c87-b08a-8f6d8800ebf3\") " pod="openstack/ceilometer-0" Oct 11 04:12:51 crc kubenswrapper[4798]: I1011 04:12:51.151195 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e3e55d17-f35e-4c87-b08a-8f6d8800ebf3-log-httpd\") pod \"ceilometer-0\" (UID: \"e3e55d17-f35e-4c87-b08a-8f6d8800ebf3\") " pod="openstack/ceilometer-0" Oct 11 04:12:51 crc kubenswrapper[4798]: I1011 04:12:51.151482 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e3e55d17-f35e-4c87-b08a-8f6d8800ebf3-run-httpd\") pod \"ceilometer-0\" (UID: \"e3e55d17-f35e-4c87-b08a-8f6d8800ebf3\") " pod="openstack/ceilometer-0" Oct 11 04:12:51 crc kubenswrapper[4798]: I1011 04:12:51.152444 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e3e55d17-f35e-4c87-b08a-8f6d8800ebf3-scripts\") pod \"ceilometer-0\" (UID: \"e3e55d17-f35e-4c87-b08a-8f6d8800ebf3\") " pod="openstack/ceilometer-0" Oct 11 04:12:51 crc kubenswrapper[4798]: I1011 04:12:51.153053 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e3e55d17-f35e-4c87-b08a-8f6d8800ebf3-config-data\") pod \"ceilometer-0\" (UID: \"e3e55d17-f35e-4c87-b08a-8f6d8800ebf3\") " pod="openstack/ceilometer-0" Oct 11 04:12:51 crc kubenswrapper[4798]: I1011 04:12:51.153737 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/e3e55d17-f35e-4c87-b08a-8f6d8800ebf3-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"e3e55d17-f35e-4c87-b08a-8f6d8800ebf3\") " pod="openstack/ceilometer-0" Oct 11 04:12:51 crc kubenswrapper[4798]: I1011 04:12:51.153743 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/e3e55d17-f35e-4c87-b08a-8f6d8800ebf3-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"e3e55d17-f35e-4c87-b08a-8f6d8800ebf3\") " pod="openstack/ceilometer-0" Oct 11 04:12:51 crc kubenswrapper[4798]: I1011 04:12:51.154881 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e3e55d17-f35e-4c87-b08a-8f6d8800ebf3-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"e3e55d17-f35e-4c87-b08a-8f6d8800ebf3\") " pod="openstack/ceilometer-0" Oct 11 04:12:51 crc kubenswrapper[4798]: I1011 04:12:51.163834 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lmw7w\" (UniqueName: \"kubernetes.io/projected/e3e55d17-f35e-4c87-b08a-8f6d8800ebf3-kube-api-access-lmw7w\") pod \"ceilometer-0\" (UID: \"e3e55d17-f35e-4c87-b08a-8f6d8800ebf3\") " pod="openstack/ceilometer-0" Oct 11 04:12:51 crc kubenswrapper[4798]: I1011 04:12:51.286380 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Oct 11 04:12:51 crc kubenswrapper[4798]: I1011 04:12:51.438432 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d835dc38-8ee2-4971-8e58-72fd231ba550" path="/var/lib/kubelet/pods/d835dc38-8ee2-4971-8e58-72fd231ba550/volumes" Oct 11 04:12:51 crc kubenswrapper[4798]: I1011 04:12:51.608859 4798 generic.go:334] "Generic (PLEG): container finished" podID="eb2fdf9e-64f1-40bb-b964-73bbe357085c" containerID="30f23467c0fe08eea3dda771f49a3e9eba8c083b26b7234b4d5b9e602e1c2d93" exitCode=0 Oct 11 04:12:51 crc kubenswrapper[4798]: I1011 04:12:51.608929 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-766995c844-7nlzq" event={"ID":"eb2fdf9e-64f1-40bb-b964-73bbe357085c","Type":"ContainerDied","Data":"30f23467c0fe08eea3dda771f49a3e9eba8c083b26b7234b4d5b9e602e1c2d93"} Oct 11 04:12:51 crc kubenswrapper[4798]: W1011 04:12:51.784313 4798 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode3e55d17_f35e_4c87_b08a_8f6d8800ebf3.slice/crio-6aea42b4e5522cd3b344f71a3dac070b7d03945103dafc992c824ba84468681d WatchSource:0}: Error finding container 6aea42b4e5522cd3b344f71a3dac070b7d03945103dafc992c824ba84468681d: Status 404 returned error can't find the container with id 6aea42b4e5522cd3b344f71a3dac070b7d03945103dafc992c824ba84468681d Oct 11 04:12:51 crc kubenswrapper[4798]: I1011 04:12:51.789928 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Oct 11 04:12:52 crc kubenswrapper[4798]: I1011 04:12:52.086810 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-766995c844-7nlzq" Oct 11 04:12:52 crc kubenswrapper[4798]: I1011 04:12:52.178128 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/eb2fdf9e-64f1-40bb-b964-73bbe357085c-combined-ca-bundle\") pod \"eb2fdf9e-64f1-40bb-b964-73bbe357085c\" (UID: \"eb2fdf9e-64f1-40bb-b964-73bbe357085c\") " Oct 11 04:12:52 crc kubenswrapper[4798]: I1011 04:12:52.178241 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xpth7\" (UniqueName: \"kubernetes.io/projected/eb2fdf9e-64f1-40bb-b964-73bbe357085c-kube-api-access-xpth7\") pod \"eb2fdf9e-64f1-40bb-b964-73bbe357085c\" (UID: \"eb2fdf9e-64f1-40bb-b964-73bbe357085c\") " Oct 11 04:12:52 crc kubenswrapper[4798]: I1011 04:12:52.178326 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/eb2fdf9e-64f1-40bb-b964-73bbe357085c-config\") pod \"eb2fdf9e-64f1-40bb-b964-73bbe357085c\" (UID: \"eb2fdf9e-64f1-40bb-b964-73bbe357085c\") " Oct 11 04:12:52 crc kubenswrapper[4798]: I1011 04:12:52.178378 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/eb2fdf9e-64f1-40bb-b964-73bbe357085c-ovndb-tls-certs\") pod \"eb2fdf9e-64f1-40bb-b964-73bbe357085c\" (UID: \"eb2fdf9e-64f1-40bb-b964-73bbe357085c\") " Oct 11 04:12:52 crc kubenswrapper[4798]: I1011 04:12:52.178452 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/eb2fdf9e-64f1-40bb-b964-73bbe357085c-httpd-config\") pod \"eb2fdf9e-64f1-40bb-b964-73bbe357085c\" (UID: \"eb2fdf9e-64f1-40bb-b964-73bbe357085c\") " Oct 11 04:12:52 crc kubenswrapper[4798]: I1011 04:12:52.184558 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/eb2fdf9e-64f1-40bb-b964-73bbe357085c-kube-api-access-xpth7" (OuterVolumeSpecName: "kube-api-access-xpth7") pod "eb2fdf9e-64f1-40bb-b964-73bbe357085c" (UID: "eb2fdf9e-64f1-40bb-b964-73bbe357085c"). InnerVolumeSpecName "kube-api-access-xpth7". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 04:12:52 crc kubenswrapper[4798]: I1011 04:12:52.185114 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/eb2fdf9e-64f1-40bb-b964-73bbe357085c-httpd-config" (OuterVolumeSpecName: "httpd-config") pod "eb2fdf9e-64f1-40bb-b964-73bbe357085c" (UID: "eb2fdf9e-64f1-40bb-b964-73bbe357085c"). InnerVolumeSpecName "httpd-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:12:52 crc kubenswrapper[4798]: I1011 04:12:52.235801 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/eb2fdf9e-64f1-40bb-b964-73bbe357085c-config" (OuterVolumeSpecName: "config") pod "eb2fdf9e-64f1-40bb-b964-73bbe357085c" (UID: "eb2fdf9e-64f1-40bb-b964-73bbe357085c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:12:52 crc kubenswrapper[4798]: I1011 04:12:52.241560 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/eb2fdf9e-64f1-40bb-b964-73bbe357085c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "eb2fdf9e-64f1-40bb-b964-73bbe357085c" (UID: "eb2fdf9e-64f1-40bb-b964-73bbe357085c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:12:52 crc kubenswrapper[4798]: I1011 04:12:52.254374 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/eb2fdf9e-64f1-40bb-b964-73bbe357085c-ovndb-tls-certs" (OuterVolumeSpecName: "ovndb-tls-certs") pod "eb2fdf9e-64f1-40bb-b964-73bbe357085c" (UID: "eb2fdf9e-64f1-40bb-b964-73bbe357085c"). InnerVolumeSpecName "ovndb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:12:52 crc kubenswrapper[4798]: I1011 04:12:52.280228 4798 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/eb2fdf9e-64f1-40bb-b964-73bbe357085c-config\") on node \"crc\" DevicePath \"\"" Oct 11 04:12:52 crc kubenswrapper[4798]: I1011 04:12:52.280260 4798 reconciler_common.go:293] "Volume detached for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/eb2fdf9e-64f1-40bb-b964-73bbe357085c-ovndb-tls-certs\") on node \"crc\" DevicePath \"\"" Oct 11 04:12:52 crc kubenswrapper[4798]: I1011 04:12:52.280300 4798 reconciler_common.go:293] "Volume detached for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/eb2fdf9e-64f1-40bb-b964-73bbe357085c-httpd-config\") on node \"crc\" DevicePath \"\"" Oct 11 04:12:52 crc kubenswrapper[4798]: I1011 04:12:52.280312 4798 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/eb2fdf9e-64f1-40bb-b964-73bbe357085c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 11 04:12:52 crc kubenswrapper[4798]: I1011 04:12:52.280321 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xpth7\" (UniqueName: \"kubernetes.io/projected/eb2fdf9e-64f1-40bb-b964-73bbe357085c-kube-api-access-xpth7\") on node \"crc\" DevicePath \"\"" Oct 11 04:12:52 crc kubenswrapper[4798]: I1011 04:12:52.621852 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-766995c844-7nlzq" event={"ID":"eb2fdf9e-64f1-40bb-b964-73bbe357085c","Type":"ContainerDied","Data":"0e43d9448071d502c32ec0f573e0ba7c5d257342a98b682d7a15d841faaf5574"} Oct 11 04:12:52 crc kubenswrapper[4798]: I1011 04:12:52.622319 4798 scope.go:117] "RemoveContainer" containerID="566ced5a5f4c94b8d17adf7b2ff3d1968e3e9d6042c791cf6d12e5ca23768760" Oct 11 04:12:52 crc kubenswrapper[4798]: I1011 04:12:52.621930 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-766995c844-7nlzq" Oct 11 04:12:52 crc kubenswrapper[4798]: I1011 04:12:52.626236 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e3e55d17-f35e-4c87-b08a-8f6d8800ebf3","Type":"ContainerStarted","Data":"05a92666dffc19e59f07cda0613c586e8e42483e7d638360864f45f8c20b70d7"} Oct 11 04:12:52 crc kubenswrapper[4798]: I1011 04:12:52.626298 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e3e55d17-f35e-4c87-b08a-8f6d8800ebf3","Type":"ContainerStarted","Data":"6aea42b4e5522cd3b344f71a3dac070b7d03945103dafc992c824ba84468681d"} Oct 11 04:12:52 crc kubenswrapper[4798]: I1011 04:12:52.665264 4798 scope.go:117] "RemoveContainer" containerID="30f23467c0fe08eea3dda771f49a3e9eba8c083b26b7234b4d5b9e602e1c2d93" Oct 11 04:12:52 crc kubenswrapper[4798]: I1011 04:12:52.669291 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-766995c844-7nlzq"] Oct 11 04:12:52 crc kubenswrapper[4798]: I1011 04:12:52.681812 4798 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-766995c844-7nlzq"] Oct 11 04:12:53 crc kubenswrapper[4798]: I1011 04:12:53.435611 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="eb2fdf9e-64f1-40bb-b964-73bbe357085c" path="/var/lib/kubelet/pods/eb2fdf9e-64f1-40bb-b964-73bbe357085c/volumes" Oct 11 04:12:53 crc kubenswrapper[4798]: I1011 04:12:53.638600 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e3e55d17-f35e-4c87-b08a-8f6d8800ebf3","Type":"ContainerStarted","Data":"1a418281aa55263c9d1b564e523ae852ba08bf62b520dd2f7bb70c1feff69d0e"} Oct 11 04:12:54 crc kubenswrapper[4798]: I1011 04:12:54.649404 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e3e55d17-f35e-4c87-b08a-8f6d8800ebf3","Type":"ContainerStarted","Data":"61f1aeb503d37741b7ebb14ead5e6ee4ea5b1a0593d9edafa9fb78f36e468b4b"} Oct 11 04:12:56 crc kubenswrapper[4798]: I1011 04:12:56.193990 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-7c3d-account-create-f9s8l"] Oct 11 04:12:56 crc kubenswrapper[4798]: E1011 04:12:56.196545 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eb2fdf9e-64f1-40bb-b964-73bbe357085c" containerName="neutron-httpd" Oct 11 04:12:56 crc kubenswrapper[4798]: I1011 04:12:56.196765 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="eb2fdf9e-64f1-40bb-b964-73bbe357085c" containerName="neutron-httpd" Oct 11 04:12:56 crc kubenswrapper[4798]: E1011 04:12:56.196895 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eb2fdf9e-64f1-40bb-b964-73bbe357085c" containerName="neutron-api" Oct 11 04:12:56 crc kubenswrapper[4798]: I1011 04:12:56.196963 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="eb2fdf9e-64f1-40bb-b964-73bbe357085c" containerName="neutron-api" Oct 11 04:12:56 crc kubenswrapper[4798]: I1011 04:12:56.197255 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="eb2fdf9e-64f1-40bb-b964-73bbe357085c" containerName="neutron-httpd" Oct 11 04:12:56 crc kubenswrapper[4798]: I1011 04:12:56.197344 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="eb2fdf9e-64f1-40bb-b964-73bbe357085c" containerName="neutron-api" Oct 11 04:12:56 crc kubenswrapper[4798]: I1011 04:12:56.198216 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-7c3d-account-create-f9s8l" Oct 11 04:12:56 crc kubenswrapper[4798]: I1011 04:12:56.209800 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-db-secret" Oct 11 04:12:56 crc kubenswrapper[4798]: I1011 04:12:56.226662 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-7c3d-account-create-f9s8l"] Oct 11 04:12:56 crc kubenswrapper[4798]: I1011 04:12:56.270841 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ndmlw\" (UniqueName: \"kubernetes.io/projected/680c8968-661e-40dd-b095-190bab5e1fac-kube-api-access-ndmlw\") pod \"nova-api-7c3d-account-create-f9s8l\" (UID: \"680c8968-661e-40dd-b095-190bab5e1fac\") " pod="openstack/nova-api-7c3d-account-create-f9s8l" Oct 11 04:12:56 crc kubenswrapper[4798]: I1011 04:12:56.372655 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ndmlw\" (UniqueName: \"kubernetes.io/projected/680c8968-661e-40dd-b095-190bab5e1fac-kube-api-access-ndmlw\") pod \"nova-api-7c3d-account-create-f9s8l\" (UID: \"680c8968-661e-40dd-b095-190bab5e1fac\") " pod="openstack/nova-api-7c3d-account-create-f9s8l" Oct 11 04:12:56 crc kubenswrapper[4798]: I1011 04:12:56.373489 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-504f-account-create-2dqzs"] Oct 11 04:12:56 crc kubenswrapper[4798]: I1011 04:12:56.375013 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-504f-account-create-2dqzs" Oct 11 04:12:56 crc kubenswrapper[4798]: I1011 04:12:56.377601 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-db-secret" Oct 11 04:12:56 crc kubenswrapper[4798]: I1011 04:12:56.387086 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-504f-account-create-2dqzs"] Oct 11 04:12:56 crc kubenswrapper[4798]: I1011 04:12:56.406311 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ndmlw\" (UniqueName: \"kubernetes.io/projected/680c8968-661e-40dd-b095-190bab5e1fac-kube-api-access-ndmlw\") pod \"nova-api-7c3d-account-create-f9s8l\" (UID: \"680c8968-661e-40dd-b095-190bab5e1fac\") " pod="openstack/nova-api-7c3d-account-create-f9s8l" Oct 11 04:12:56 crc kubenswrapper[4798]: I1011 04:12:56.474485 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mw8nx\" (UniqueName: \"kubernetes.io/projected/603093d3-dcc2-4ee2-be29-de2505f5c238-kube-api-access-mw8nx\") pod \"nova-cell0-504f-account-create-2dqzs\" (UID: \"603093d3-dcc2-4ee2-be29-de2505f5c238\") " pod="openstack/nova-cell0-504f-account-create-2dqzs" Oct 11 04:12:56 crc kubenswrapper[4798]: I1011 04:12:56.526940 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-0bfb-account-create-7nxnt"] Oct 11 04:12:56 crc kubenswrapper[4798]: I1011 04:12:56.528203 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-0bfb-account-create-7nxnt" Oct 11 04:12:56 crc kubenswrapper[4798]: I1011 04:12:56.530822 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-7c3d-account-create-f9s8l" Oct 11 04:12:56 crc kubenswrapper[4798]: I1011 04:12:56.536075 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-0bfb-account-create-7nxnt"] Oct 11 04:12:56 crc kubenswrapper[4798]: I1011 04:12:56.550522 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-db-secret" Oct 11 04:12:56 crc kubenswrapper[4798]: I1011 04:12:56.576181 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mw8nx\" (UniqueName: \"kubernetes.io/projected/603093d3-dcc2-4ee2-be29-de2505f5c238-kube-api-access-mw8nx\") pod \"nova-cell0-504f-account-create-2dqzs\" (UID: \"603093d3-dcc2-4ee2-be29-de2505f5c238\") " pod="openstack/nova-cell0-504f-account-create-2dqzs" Oct 11 04:12:56 crc kubenswrapper[4798]: I1011 04:12:56.599368 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mw8nx\" (UniqueName: \"kubernetes.io/projected/603093d3-dcc2-4ee2-be29-de2505f5c238-kube-api-access-mw8nx\") pod \"nova-cell0-504f-account-create-2dqzs\" (UID: \"603093d3-dcc2-4ee2-be29-de2505f5c238\") " pod="openstack/nova-cell0-504f-account-create-2dqzs" Oct 11 04:12:56 crc kubenswrapper[4798]: I1011 04:12:56.681541 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j5fsm\" (UniqueName: \"kubernetes.io/projected/2ea903c7-d1f2-455c-80ca-e672df23712b-kube-api-access-j5fsm\") pod \"nova-cell1-0bfb-account-create-7nxnt\" (UID: \"2ea903c7-d1f2-455c-80ca-e672df23712b\") " pod="openstack/nova-cell1-0bfb-account-create-7nxnt" Oct 11 04:12:56 crc kubenswrapper[4798]: I1011 04:12:56.685616 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e3e55d17-f35e-4c87-b08a-8f6d8800ebf3","Type":"ContainerStarted","Data":"64724848ea57443261bfed5ee339b35f38906fbc635050e9786f2e97714eedb7"} Oct 11 04:12:56 crc kubenswrapper[4798]: I1011 04:12:56.686146 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Oct 11 04:12:56 crc kubenswrapper[4798]: I1011 04:12:56.696382 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-504f-account-create-2dqzs" Oct 11 04:12:56 crc kubenswrapper[4798]: I1011 04:12:56.723888 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=3.036158516 podStartE2EDuration="6.723867493s" podCreationTimestamp="2025-10-11 04:12:50 +0000 UTC" firstStartedPulling="2025-10-11 04:12:51.787609369 +0000 UTC m=+1067.123899055" lastFinishedPulling="2025-10-11 04:12:55.475318346 +0000 UTC m=+1070.811608032" observedRunningTime="2025-10-11 04:12:56.710815631 +0000 UTC m=+1072.047105317" watchObservedRunningTime="2025-10-11 04:12:56.723867493 +0000 UTC m=+1072.060157179" Oct 11 04:12:56 crc kubenswrapper[4798]: I1011 04:12:56.784168 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j5fsm\" (UniqueName: \"kubernetes.io/projected/2ea903c7-d1f2-455c-80ca-e672df23712b-kube-api-access-j5fsm\") pod \"nova-cell1-0bfb-account-create-7nxnt\" (UID: \"2ea903c7-d1f2-455c-80ca-e672df23712b\") " pod="openstack/nova-cell1-0bfb-account-create-7nxnt" Oct 11 04:12:56 crc kubenswrapper[4798]: I1011 04:12:56.808002 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j5fsm\" (UniqueName: \"kubernetes.io/projected/2ea903c7-d1f2-455c-80ca-e672df23712b-kube-api-access-j5fsm\") pod \"nova-cell1-0bfb-account-create-7nxnt\" (UID: \"2ea903c7-d1f2-455c-80ca-e672df23712b\") " pod="openstack/nova-cell1-0bfb-account-create-7nxnt" Oct 11 04:12:56 crc kubenswrapper[4798]: I1011 04:12:56.956983 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-0bfb-account-create-7nxnt" Oct 11 04:12:56 crc kubenswrapper[4798]: I1011 04:12:56.996625 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-7c3d-account-create-f9s8l"] Oct 11 04:12:57 crc kubenswrapper[4798]: I1011 04:12:57.143164 4798 patch_prober.go:28] interesting pod/machine-config-daemon-h28s2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 11 04:12:57 crc kubenswrapper[4798]: I1011 04:12:57.143219 4798 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 11 04:12:57 crc kubenswrapper[4798]: I1011 04:12:57.204568 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-504f-account-create-2dqzs"] Oct 11 04:12:57 crc kubenswrapper[4798]: I1011 04:12:57.652935 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-0bfb-account-create-7nxnt"] Oct 11 04:12:57 crc kubenswrapper[4798]: W1011 04:12:57.667046 4798 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2ea903c7_d1f2_455c_80ca_e672df23712b.slice/crio-cff0e38c5a75049cef1a5de2c19d1861a49f8d9a080c21af862e3a81e86f77cc WatchSource:0}: Error finding container cff0e38c5a75049cef1a5de2c19d1861a49f8d9a080c21af862e3a81e86f77cc: Status 404 returned error can't find the container with id cff0e38c5a75049cef1a5de2c19d1861a49f8d9a080c21af862e3a81e86f77cc Oct 11 04:12:57 crc kubenswrapper[4798]: I1011 04:12:57.698091 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-0bfb-account-create-7nxnt" event={"ID":"2ea903c7-d1f2-455c-80ca-e672df23712b","Type":"ContainerStarted","Data":"cff0e38c5a75049cef1a5de2c19d1861a49f8d9a080c21af862e3a81e86f77cc"} Oct 11 04:12:57 crc kubenswrapper[4798]: I1011 04:12:57.700082 4798 generic.go:334] "Generic (PLEG): container finished" podID="680c8968-661e-40dd-b095-190bab5e1fac" containerID="1de8a781698d423ab551bbcc210192ee891f6a2823c2e5ccd708cb795e666b0e" exitCode=0 Oct 11 04:12:57 crc kubenswrapper[4798]: I1011 04:12:57.700467 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-7c3d-account-create-f9s8l" event={"ID":"680c8968-661e-40dd-b095-190bab5e1fac","Type":"ContainerDied","Data":"1de8a781698d423ab551bbcc210192ee891f6a2823c2e5ccd708cb795e666b0e"} Oct 11 04:12:57 crc kubenswrapper[4798]: I1011 04:12:57.700634 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-7c3d-account-create-f9s8l" event={"ID":"680c8968-661e-40dd-b095-190bab5e1fac","Type":"ContainerStarted","Data":"016fc7de0681e7c06ac0280f2570a06539bab398765ce0b7ab32fd13b785364e"} Oct 11 04:12:57 crc kubenswrapper[4798]: I1011 04:12:57.703204 4798 generic.go:334] "Generic (PLEG): container finished" podID="603093d3-dcc2-4ee2-be29-de2505f5c238" containerID="4560682c05da7184bb3fc5dedadb1f06d54929d6892f26dcdc2520647490abc5" exitCode=0 Oct 11 04:12:57 crc kubenswrapper[4798]: I1011 04:12:57.703296 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-504f-account-create-2dqzs" event={"ID":"603093d3-dcc2-4ee2-be29-de2505f5c238","Type":"ContainerDied","Data":"4560682c05da7184bb3fc5dedadb1f06d54929d6892f26dcdc2520647490abc5"} Oct 11 04:12:57 crc kubenswrapper[4798]: I1011 04:12:57.703355 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-504f-account-create-2dqzs" event={"ID":"603093d3-dcc2-4ee2-be29-de2505f5c238","Type":"ContainerStarted","Data":"11783230100f839388967198e7ef9444a1d65ea60ae256c1c44e71b63e4cdee6"} Oct 11 04:12:58 crc kubenswrapper[4798]: I1011 04:12:58.718301 4798 generic.go:334] "Generic (PLEG): container finished" podID="2ea903c7-d1f2-455c-80ca-e672df23712b" containerID="d4e2a2b908733935ff933bdbfadb57ba237ddcc88bbf8529a93d38e6dd027710" exitCode=0 Oct 11 04:12:58 crc kubenswrapper[4798]: I1011 04:12:58.718432 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-0bfb-account-create-7nxnt" event={"ID":"2ea903c7-d1f2-455c-80ca-e672df23712b","Type":"ContainerDied","Data":"d4e2a2b908733935ff933bdbfadb57ba237ddcc88bbf8529a93d38e6dd027710"} Oct 11 04:12:59 crc kubenswrapper[4798]: I1011 04:12:59.261158 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-7c3d-account-create-f9s8l" Oct 11 04:12:59 crc kubenswrapper[4798]: I1011 04:12:59.267898 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-504f-account-create-2dqzs" Oct 11 04:12:59 crc kubenswrapper[4798]: I1011 04:12:59.362546 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mw8nx\" (UniqueName: \"kubernetes.io/projected/603093d3-dcc2-4ee2-be29-de2505f5c238-kube-api-access-mw8nx\") pod \"603093d3-dcc2-4ee2-be29-de2505f5c238\" (UID: \"603093d3-dcc2-4ee2-be29-de2505f5c238\") " Oct 11 04:12:59 crc kubenswrapper[4798]: I1011 04:12:59.362809 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ndmlw\" (UniqueName: \"kubernetes.io/projected/680c8968-661e-40dd-b095-190bab5e1fac-kube-api-access-ndmlw\") pod \"680c8968-661e-40dd-b095-190bab5e1fac\" (UID: \"680c8968-661e-40dd-b095-190bab5e1fac\") " Oct 11 04:12:59 crc kubenswrapper[4798]: I1011 04:12:59.372995 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/680c8968-661e-40dd-b095-190bab5e1fac-kube-api-access-ndmlw" (OuterVolumeSpecName: "kube-api-access-ndmlw") pod "680c8968-661e-40dd-b095-190bab5e1fac" (UID: "680c8968-661e-40dd-b095-190bab5e1fac"). InnerVolumeSpecName "kube-api-access-ndmlw". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 04:12:59 crc kubenswrapper[4798]: I1011 04:12:59.373175 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/603093d3-dcc2-4ee2-be29-de2505f5c238-kube-api-access-mw8nx" (OuterVolumeSpecName: "kube-api-access-mw8nx") pod "603093d3-dcc2-4ee2-be29-de2505f5c238" (UID: "603093d3-dcc2-4ee2-be29-de2505f5c238"). InnerVolumeSpecName "kube-api-access-mw8nx". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 04:12:59 crc kubenswrapper[4798]: I1011 04:12:59.465229 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mw8nx\" (UniqueName: \"kubernetes.io/projected/603093d3-dcc2-4ee2-be29-de2505f5c238-kube-api-access-mw8nx\") on node \"crc\" DevicePath \"\"" Oct 11 04:12:59 crc kubenswrapper[4798]: I1011 04:12:59.465262 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ndmlw\" (UniqueName: \"kubernetes.io/projected/680c8968-661e-40dd-b095-190bab5e1fac-kube-api-access-ndmlw\") on node \"crc\" DevicePath \"\"" Oct 11 04:12:59 crc kubenswrapper[4798]: I1011 04:12:59.730210 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-7c3d-account-create-f9s8l" Oct 11 04:12:59 crc kubenswrapper[4798]: I1011 04:12:59.730245 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-7c3d-account-create-f9s8l" event={"ID":"680c8968-661e-40dd-b095-190bab5e1fac","Type":"ContainerDied","Data":"016fc7de0681e7c06ac0280f2570a06539bab398765ce0b7ab32fd13b785364e"} Oct 11 04:12:59 crc kubenswrapper[4798]: I1011 04:12:59.730318 4798 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="016fc7de0681e7c06ac0280f2570a06539bab398765ce0b7ab32fd13b785364e" Oct 11 04:12:59 crc kubenswrapper[4798]: I1011 04:12:59.732330 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-504f-account-create-2dqzs" Oct 11 04:12:59 crc kubenswrapper[4798]: I1011 04:12:59.732323 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-504f-account-create-2dqzs" event={"ID":"603093d3-dcc2-4ee2-be29-de2505f5c238","Type":"ContainerDied","Data":"11783230100f839388967198e7ef9444a1d65ea60ae256c1c44e71b63e4cdee6"} Oct 11 04:12:59 crc kubenswrapper[4798]: I1011 04:12:59.732372 4798 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="11783230100f839388967198e7ef9444a1d65ea60ae256c1c44e71b63e4cdee6" Oct 11 04:13:00 crc kubenswrapper[4798]: I1011 04:13:00.022656 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-0bfb-account-create-7nxnt" Oct 11 04:13:00 crc kubenswrapper[4798]: I1011 04:13:00.179408 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-j5fsm\" (UniqueName: \"kubernetes.io/projected/2ea903c7-d1f2-455c-80ca-e672df23712b-kube-api-access-j5fsm\") pod \"2ea903c7-d1f2-455c-80ca-e672df23712b\" (UID: \"2ea903c7-d1f2-455c-80ca-e672df23712b\") " Oct 11 04:13:00 crc kubenswrapper[4798]: I1011 04:13:00.201693 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2ea903c7-d1f2-455c-80ca-e672df23712b-kube-api-access-j5fsm" (OuterVolumeSpecName: "kube-api-access-j5fsm") pod "2ea903c7-d1f2-455c-80ca-e672df23712b" (UID: "2ea903c7-d1f2-455c-80ca-e672df23712b"). InnerVolumeSpecName "kube-api-access-j5fsm". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 04:13:00 crc kubenswrapper[4798]: I1011 04:13:00.281882 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-j5fsm\" (UniqueName: \"kubernetes.io/projected/2ea903c7-d1f2-455c-80ca-e672df23712b-kube-api-access-j5fsm\") on node \"crc\" DevicePath \"\"" Oct 11 04:13:00 crc kubenswrapper[4798]: I1011 04:13:00.743032 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-0bfb-account-create-7nxnt" event={"ID":"2ea903c7-d1f2-455c-80ca-e672df23712b","Type":"ContainerDied","Data":"cff0e38c5a75049cef1a5de2c19d1861a49f8d9a080c21af862e3a81e86f77cc"} Oct 11 04:13:00 crc kubenswrapper[4798]: I1011 04:13:00.744126 4798 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="cff0e38c5a75049cef1a5de2c19d1861a49f8d9a080c21af862e3a81e86f77cc" Oct 11 04:13:00 crc kubenswrapper[4798]: I1011 04:13:00.743076 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-0bfb-account-create-7nxnt" Oct 11 04:13:01 crc kubenswrapper[4798]: I1011 04:13:01.671823 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-db-sync-t4fxb"] Oct 11 04:13:01 crc kubenswrapper[4798]: E1011 04:13:01.672431 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="603093d3-dcc2-4ee2-be29-de2505f5c238" containerName="mariadb-account-create" Oct 11 04:13:01 crc kubenswrapper[4798]: I1011 04:13:01.672452 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="603093d3-dcc2-4ee2-be29-de2505f5c238" containerName="mariadb-account-create" Oct 11 04:13:01 crc kubenswrapper[4798]: E1011 04:13:01.672474 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2ea903c7-d1f2-455c-80ca-e672df23712b" containerName="mariadb-account-create" Oct 11 04:13:01 crc kubenswrapper[4798]: I1011 04:13:01.672482 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="2ea903c7-d1f2-455c-80ca-e672df23712b" containerName="mariadb-account-create" Oct 11 04:13:01 crc kubenswrapper[4798]: E1011 04:13:01.672500 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="680c8968-661e-40dd-b095-190bab5e1fac" containerName="mariadb-account-create" Oct 11 04:13:01 crc kubenswrapper[4798]: I1011 04:13:01.672508 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="680c8968-661e-40dd-b095-190bab5e1fac" containerName="mariadb-account-create" Oct 11 04:13:01 crc kubenswrapper[4798]: I1011 04:13:01.672812 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="603093d3-dcc2-4ee2-be29-de2505f5c238" containerName="mariadb-account-create" Oct 11 04:13:01 crc kubenswrapper[4798]: I1011 04:13:01.672831 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="2ea903c7-d1f2-455c-80ca-e672df23712b" containerName="mariadb-account-create" Oct 11 04:13:01 crc kubenswrapper[4798]: I1011 04:13:01.672851 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="680c8968-661e-40dd-b095-190bab5e1fac" containerName="mariadb-account-create" Oct 11 04:13:01 crc kubenswrapper[4798]: I1011 04:13:01.673850 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-t4fxb" Oct 11 04:13:01 crc kubenswrapper[4798]: I1011 04:13:01.678033 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-scripts" Oct 11 04:13:01 crc kubenswrapper[4798]: I1011 04:13:01.678459 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Oct 11 04:13:01 crc kubenswrapper[4798]: I1011 04:13:01.682752 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-hfd27" Oct 11 04:13:01 crc kubenswrapper[4798]: I1011 04:13:01.687328 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-t4fxb"] Oct 11 04:13:01 crc kubenswrapper[4798]: I1011 04:13:01.813557 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b5ce0d5d-bf5d-4e53-b93b-6748bcd257c9-config-data\") pod \"nova-cell0-conductor-db-sync-t4fxb\" (UID: \"b5ce0d5d-bf5d-4e53-b93b-6748bcd257c9\") " pod="openstack/nova-cell0-conductor-db-sync-t4fxb" Oct 11 04:13:01 crc kubenswrapper[4798]: I1011 04:13:01.814052 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b5ce0d5d-bf5d-4e53-b93b-6748bcd257c9-scripts\") pod \"nova-cell0-conductor-db-sync-t4fxb\" (UID: \"b5ce0d5d-bf5d-4e53-b93b-6748bcd257c9\") " pod="openstack/nova-cell0-conductor-db-sync-t4fxb" Oct 11 04:13:01 crc kubenswrapper[4798]: I1011 04:13:01.814073 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g7btj\" (UniqueName: \"kubernetes.io/projected/b5ce0d5d-bf5d-4e53-b93b-6748bcd257c9-kube-api-access-g7btj\") pod \"nova-cell0-conductor-db-sync-t4fxb\" (UID: \"b5ce0d5d-bf5d-4e53-b93b-6748bcd257c9\") " pod="openstack/nova-cell0-conductor-db-sync-t4fxb" Oct 11 04:13:01 crc kubenswrapper[4798]: I1011 04:13:01.814119 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b5ce0d5d-bf5d-4e53-b93b-6748bcd257c9-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-t4fxb\" (UID: \"b5ce0d5d-bf5d-4e53-b93b-6748bcd257c9\") " pod="openstack/nova-cell0-conductor-db-sync-t4fxb" Oct 11 04:13:01 crc kubenswrapper[4798]: I1011 04:13:01.915864 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b5ce0d5d-bf5d-4e53-b93b-6748bcd257c9-config-data\") pod \"nova-cell0-conductor-db-sync-t4fxb\" (UID: \"b5ce0d5d-bf5d-4e53-b93b-6748bcd257c9\") " pod="openstack/nova-cell0-conductor-db-sync-t4fxb" Oct 11 04:13:01 crc kubenswrapper[4798]: I1011 04:13:01.915922 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b5ce0d5d-bf5d-4e53-b93b-6748bcd257c9-scripts\") pod \"nova-cell0-conductor-db-sync-t4fxb\" (UID: \"b5ce0d5d-bf5d-4e53-b93b-6748bcd257c9\") " pod="openstack/nova-cell0-conductor-db-sync-t4fxb" Oct 11 04:13:01 crc kubenswrapper[4798]: I1011 04:13:01.915939 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g7btj\" (UniqueName: \"kubernetes.io/projected/b5ce0d5d-bf5d-4e53-b93b-6748bcd257c9-kube-api-access-g7btj\") pod \"nova-cell0-conductor-db-sync-t4fxb\" (UID: \"b5ce0d5d-bf5d-4e53-b93b-6748bcd257c9\") " pod="openstack/nova-cell0-conductor-db-sync-t4fxb" Oct 11 04:13:01 crc kubenswrapper[4798]: I1011 04:13:01.915980 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b5ce0d5d-bf5d-4e53-b93b-6748bcd257c9-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-t4fxb\" (UID: \"b5ce0d5d-bf5d-4e53-b93b-6748bcd257c9\") " pod="openstack/nova-cell0-conductor-db-sync-t4fxb" Oct 11 04:13:01 crc kubenswrapper[4798]: I1011 04:13:01.925350 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b5ce0d5d-bf5d-4e53-b93b-6748bcd257c9-config-data\") pod \"nova-cell0-conductor-db-sync-t4fxb\" (UID: \"b5ce0d5d-bf5d-4e53-b93b-6748bcd257c9\") " pod="openstack/nova-cell0-conductor-db-sync-t4fxb" Oct 11 04:13:01 crc kubenswrapper[4798]: I1011 04:13:01.926416 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b5ce0d5d-bf5d-4e53-b93b-6748bcd257c9-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-t4fxb\" (UID: \"b5ce0d5d-bf5d-4e53-b93b-6748bcd257c9\") " pod="openstack/nova-cell0-conductor-db-sync-t4fxb" Oct 11 04:13:01 crc kubenswrapper[4798]: I1011 04:13:01.927022 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b5ce0d5d-bf5d-4e53-b93b-6748bcd257c9-scripts\") pod \"nova-cell0-conductor-db-sync-t4fxb\" (UID: \"b5ce0d5d-bf5d-4e53-b93b-6748bcd257c9\") " pod="openstack/nova-cell0-conductor-db-sync-t4fxb" Oct 11 04:13:01 crc kubenswrapper[4798]: I1011 04:13:01.943260 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g7btj\" (UniqueName: \"kubernetes.io/projected/b5ce0d5d-bf5d-4e53-b93b-6748bcd257c9-kube-api-access-g7btj\") pod \"nova-cell0-conductor-db-sync-t4fxb\" (UID: \"b5ce0d5d-bf5d-4e53-b93b-6748bcd257c9\") " pod="openstack/nova-cell0-conductor-db-sync-t4fxb" Oct 11 04:13:02 crc kubenswrapper[4798]: I1011 04:13:02.009237 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-t4fxb" Oct 11 04:13:02 crc kubenswrapper[4798]: I1011 04:13:02.501351 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-t4fxb"] Oct 11 04:13:02 crc kubenswrapper[4798]: I1011 04:13:02.761710 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-t4fxb" event={"ID":"b5ce0d5d-bf5d-4e53-b93b-6748bcd257c9","Type":"ContainerStarted","Data":"2d5e1f4908a6a8042f7765c8c86c624aaa3e714cd23fca309c4292602f756cde"} Oct 11 04:13:11 crc kubenswrapper[4798]: I1011 04:13:11.855558 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-t4fxb" event={"ID":"b5ce0d5d-bf5d-4e53-b93b-6748bcd257c9","Type":"ContainerStarted","Data":"409eff34496e683abdf66df063cbb478d44a5e51a9ba3e4e32ab1559d476887a"} Oct 11 04:13:11 crc kubenswrapper[4798]: I1011 04:13:11.893341 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-db-sync-t4fxb" podStartSLOduration=2.059713083 podStartE2EDuration="10.89331632s" podCreationTimestamp="2025-10-11 04:13:01 +0000 UTC" firstStartedPulling="2025-10-11 04:13:02.503602712 +0000 UTC m=+1077.839892398" lastFinishedPulling="2025-10-11 04:13:11.337205949 +0000 UTC m=+1086.673495635" observedRunningTime="2025-10-11 04:13:11.879014149 +0000 UTC m=+1087.215303865" watchObservedRunningTime="2025-10-11 04:13:11.89331632 +0000 UTC m=+1087.229606016" Oct 11 04:13:21 crc kubenswrapper[4798]: I1011 04:13:21.295182 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Oct 11 04:13:24 crc kubenswrapper[4798]: I1011 04:13:24.022358 4798 generic.go:334] "Generic (PLEG): container finished" podID="b5ce0d5d-bf5d-4e53-b93b-6748bcd257c9" containerID="409eff34496e683abdf66df063cbb478d44a5e51a9ba3e4e32ab1559d476887a" exitCode=0 Oct 11 04:13:24 crc kubenswrapper[4798]: I1011 04:13:24.022449 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-t4fxb" event={"ID":"b5ce0d5d-bf5d-4e53-b93b-6748bcd257c9","Type":"ContainerDied","Data":"409eff34496e683abdf66df063cbb478d44a5e51a9ba3e4e32ab1559d476887a"} Oct 11 04:13:25 crc kubenswrapper[4798]: I1011 04:13:25.390079 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-t4fxb" Oct 11 04:13:25 crc kubenswrapper[4798]: I1011 04:13:25.533500 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g7btj\" (UniqueName: \"kubernetes.io/projected/b5ce0d5d-bf5d-4e53-b93b-6748bcd257c9-kube-api-access-g7btj\") pod \"b5ce0d5d-bf5d-4e53-b93b-6748bcd257c9\" (UID: \"b5ce0d5d-bf5d-4e53-b93b-6748bcd257c9\") " Oct 11 04:13:25 crc kubenswrapper[4798]: I1011 04:13:25.533630 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b5ce0d5d-bf5d-4e53-b93b-6748bcd257c9-scripts\") pod \"b5ce0d5d-bf5d-4e53-b93b-6748bcd257c9\" (UID: \"b5ce0d5d-bf5d-4e53-b93b-6748bcd257c9\") " Oct 11 04:13:25 crc kubenswrapper[4798]: I1011 04:13:25.533816 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b5ce0d5d-bf5d-4e53-b93b-6748bcd257c9-config-data\") pod \"b5ce0d5d-bf5d-4e53-b93b-6748bcd257c9\" (UID: \"b5ce0d5d-bf5d-4e53-b93b-6748bcd257c9\") " Oct 11 04:13:25 crc kubenswrapper[4798]: I1011 04:13:25.535560 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b5ce0d5d-bf5d-4e53-b93b-6748bcd257c9-combined-ca-bundle\") pod \"b5ce0d5d-bf5d-4e53-b93b-6748bcd257c9\" (UID: \"b5ce0d5d-bf5d-4e53-b93b-6748bcd257c9\") " Oct 11 04:13:25 crc kubenswrapper[4798]: I1011 04:13:25.541411 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b5ce0d5d-bf5d-4e53-b93b-6748bcd257c9-scripts" (OuterVolumeSpecName: "scripts") pod "b5ce0d5d-bf5d-4e53-b93b-6748bcd257c9" (UID: "b5ce0d5d-bf5d-4e53-b93b-6748bcd257c9"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:13:25 crc kubenswrapper[4798]: I1011 04:13:25.541753 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b5ce0d5d-bf5d-4e53-b93b-6748bcd257c9-kube-api-access-g7btj" (OuterVolumeSpecName: "kube-api-access-g7btj") pod "b5ce0d5d-bf5d-4e53-b93b-6748bcd257c9" (UID: "b5ce0d5d-bf5d-4e53-b93b-6748bcd257c9"). InnerVolumeSpecName "kube-api-access-g7btj". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 04:13:25 crc kubenswrapper[4798]: I1011 04:13:25.566733 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b5ce0d5d-bf5d-4e53-b93b-6748bcd257c9-config-data" (OuterVolumeSpecName: "config-data") pod "b5ce0d5d-bf5d-4e53-b93b-6748bcd257c9" (UID: "b5ce0d5d-bf5d-4e53-b93b-6748bcd257c9"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:13:25 crc kubenswrapper[4798]: I1011 04:13:25.583787 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b5ce0d5d-bf5d-4e53-b93b-6748bcd257c9-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b5ce0d5d-bf5d-4e53-b93b-6748bcd257c9" (UID: "b5ce0d5d-bf5d-4e53-b93b-6748bcd257c9"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:13:25 crc kubenswrapper[4798]: I1011 04:13:25.638303 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g7btj\" (UniqueName: \"kubernetes.io/projected/b5ce0d5d-bf5d-4e53-b93b-6748bcd257c9-kube-api-access-g7btj\") on node \"crc\" DevicePath \"\"" Oct 11 04:13:25 crc kubenswrapper[4798]: I1011 04:13:25.638339 4798 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b5ce0d5d-bf5d-4e53-b93b-6748bcd257c9-scripts\") on node \"crc\" DevicePath \"\"" Oct 11 04:13:25 crc kubenswrapper[4798]: I1011 04:13:25.638350 4798 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b5ce0d5d-bf5d-4e53-b93b-6748bcd257c9-config-data\") on node \"crc\" DevicePath \"\"" Oct 11 04:13:25 crc kubenswrapper[4798]: I1011 04:13:25.638363 4798 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b5ce0d5d-bf5d-4e53-b93b-6748bcd257c9-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 11 04:13:26 crc kubenswrapper[4798]: I1011 04:13:26.047846 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-t4fxb" event={"ID":"b5ce0d5d-bf5d-4e53-b93b-6748bcd257c9","Type":"ContainerDied","Data":"2d5e1f4908a6a8042f7765c8c86c624aaa3e714cd23fca309c4292602f756cde"} Oct 11 04:13:26 crc kubenswrapper[4798]: I1011 04:13:26.047897 4798 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2d5e1f4908a6a8042f7765c8c86c624aaa3e714cd23fca309c4292602f756cde" Oct 11 04:13:26 crc kubenswrapper[4798]: I1011 04:13:26.047922 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-t4fxb" Oct 11 04:13:26 crc kubenswrapper[4798]: I1011 04:13:26.157328 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-0"] Oct 11 04:13:26 crc kubenswrapper[4798]: E1011 04:13:26.157874 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b5ce0d5d-bf5d-4e53-b93b-6748bcd257c9" containerName="nova-cell0-conductor-db-sync" Oct 11 04:13:26 crc kubenswrapper[4798]: I1011 04:13:26.157900 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="b5ce0d5d-bf5d-4e53-b93b-6748bcd257c9" containerName="nova-cell0-conductor-db-sync" Oct 11 04:13:26 crc kubenswrapper[4798]: I1011 04:13:26.158119 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="b5ce0d5d-bf5d-4e53-b93b-6748bcd257c9" containerName="nova-cell0-conductor-db-sync" Oct 11 04:13:26 crc kubenswrapper[4798]: I1011 04:13:26.158936 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Oct 11 04:13:26 crc kubenswrapper[4798]: I1011 04:13:26.161488 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Oct 11 04:13:26 crc kubenswrapper[4798]: I1011 04:13:26.162255 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-hfd27" Oct 11 04:13:26 crc kubenswrapper[4798]: I1011 04:13:26.183382 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Oct 11 04:13:26 crc kubenswrapper[4798]: I1011 04:13:26.251236 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/759110c9-4bac-404b-a088-4c0cd6c63d17-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"759110c9-4bac-404b-a088-4c0cd6c63d17\") " pod="openstack/nova-cell0-conductor-0" Oct 11 04:13:26 crc kubenswrapper[4798]: I1011 04:13:26.251306 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s8cwb\" (UniqueName: \"kubernetes.io/projected/759110c9-4bac-404b-a088-4c0cd6c63d17-kube-api-access-s8cwb\") pod \"nova-cell0-conductor-0\" (UID: \"759110c9-4bac-404b-a088-4c0cd6c63d17\") " pod="openstack/nova-cell0-conductor-0" Oct 11 04:13:26 crc kubenswrapper[4798]: I1011 04:13:26.251357 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/759110c9-4bac-404b-a088-4c0cd6c63d17-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"759110c9-4bac-404b-a088-4c0cd6c63d17\") " pod="openstack/nova-cell0-conductor-0" Oct 11 04:13:26 crc kubenswrapper[4798]: I1011 04:13:26.353696 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/759110c9-4bac-404b-a088-4c0cd6c63d17-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"759110c9-4bac-404b-a088-4c0cd6c63d17\") " pod="openstack/nova-cell0-conductor-0" Oct 11 04:13:26 crc kubenswrapper[4798]: I1011 04:13:26.353799 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s8cwb\" (UniqueName: \"kubernetes.io/projected/759110c9-4bac-404b-a088-4c0cd6c63d17-kube-api-access-s8cwb\") pod \"nova-cell0-conductor-0\" (UID: \"759110c9-4bac-404b-a088-4c0cd6c63d17\") " pod="openstack/nova-cell0-conductor-0" Oct 11 04:13:26 crc kubenswrapper[4798]: I1011 04:13:26.353867 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/759110c9-4bac-404b-a088-4c0cd6c63d17-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"759110c9-4bac-404b-a088-4c0cd6c63d17\") " pod="openstack/nova-cell0-conductor-0" Oct 11 04:13:26 crc kubenswrapper[4798]: I1011 04:13:26.359615 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/759110c9-4bac-404b-a088-4c0cd6c63d17-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"759110c9-4bac-404b-a088-4c0cd6c63d17\") " pod="openstack/nova-cell0-conductor-0" Oct 11 04:13:26 crc kubenswrapper[4798]: I1011 04:13:26.362967 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/759110c9-4bac-404b-a088-4c0cd6c63d17-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"759110c9-4bac-404b-a088-4c0cd6c63d17\") " pod="openstack/nova-cell0-conductor-0" Oct 11 04:13:26 crc kubenswrapper[4798]: I1011 04:13:26.377169 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s8cwb\" (UniqueName: \"kubernetes.io/projected/759110c9-4bac-404b-a088-4c0cd6c63d17-kube-api-access-s8cwb\") pod \"nova-cell0-conductor-0\" (UID: \"759110c9-4bac-404b-a088-4c0cd6c63d17\") " pod="openstack/nova-cell0-conductor-0" Oct 11 04:13:26 crc kubenswrapper[4798]: I1011 04:13:26.476852 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Oct 11 04:13:26 crc kubenswrapper[4798]: I1011 04:13:26.790070 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Oct 11 04:13:27 crc kubenswrapper[4798]: I1011 04:13:27.063432 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"759110c9-4bac-404b-a088-4c0cd6c63d17","Type":"ContainerStarted","Data":"ea765b43bdd39c0ba0ef0c171f6909e146c6603403c8cef7024ee049da3d224a"} Oct 11 04:13:27 crc kubenswrapper[4798]: I1011 04:13:27.063976 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"759110c9-4bac-404b-a088-4c0cd6c63d17","Type":"ContainerStarted","Data":"67c2b99fc25167d20fbf089bfe80f7778592e575c7651337fcd824e2bfa107bc"} Oct 11 04:13:27 crc kubenswrapper[4798]: I1011 04:13:27.064042 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell0-conductor-0" Oct 11 04:13:27 crc kubenswrapper[4798]: I1011 04:13:27.096588 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-0" podStartSLOduration=1.096558337 podStartE2EDuration="1.096558337s" podCreationTimestamp="2025-10-11 04:13:26 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 04:13:27.087145124 +0000 UTC m=+1102.423434840" watchObservedRunningTime="2025-10-11 04:13:27.096558337 +0000 UTC m=+1102.432848023" Oct 11 04:13:27 crc kubenswrapper[4798]: I1011 04:13:27.139028 4798 patch_prober.go:28] interesting pod/machine-config-daemon-h28s2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 11 04:13:27 crc kubenswrapper[4798]: I1011 04:13:27.139125 4798 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 11 04:13:31 crc kubenswrapper[4798]: I1011 04:13:31.510158 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell0-conductor-0" Oct 11 04:13:31 crc kubenswrapper[4798]: I1011 04:13:31.977950 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-cell-mapping-tqmm5"] Oct 11 04:13:31 crc kubenswrapper[4798]: I1011 04:13:31.979244 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-tqmm5" Oct 11 04:13:31 crc kubenswrapper[4798]: I1011 04:13:31.981541 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-config-data" Oct 11 04:13:31 crc kubenswrapper[4798]: I1011 04:13:31.981770 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-scripts" Oct 11 04:13:32 crc kubenswrapper[4798]: I1011 04:13:32.002820 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-tqmm5"] Oct 11 04:13:32 crc kubenswrapper[4798]: I1011 04:13:32.073953 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v9wck\" (UniqueName: \"kubernetes.io/projected/2ab3945b-9225-4901-a3ed-57ea8dcaf456-kube-api-access-v9wck\") pod \"nova-cell0-cell-mapping-tqmm5\" (UID: \"2ab3945b-9225-4901-a3ed-57ea8dcaf456\") " pod="openstack/nova-cell0-cell-mapping-tqmm5" Oct 11 04:13:32 crc kubenswrapper[4798]: I1011 04:13:32.074412 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2ab3945b-9225-4901-a3ed-57ea8dcaf456-config-data\") pod \"nova-cell0-cell-mapping-tqmm5\" (UID: \"2ab3945b-9225-4901-a3ed-57ea8dcaf456\") " pod="openstack/nova-cell0-cell-mapping-tqmm5" Oct 11 04:13:32 crc kubenswrapper[4798]: I1011 04:13:32.074712 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2ab3945b-9225-4901-a3ed-57ea8dcaf456-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-tqmm5\" (UID: \"2ab3945b-9225-4901-a3ed-57ea8dcaf456\") " pod="openstack/nova-cell0-cell-mapping-tqmm5" Oct 11 04:13:32 crc kubenswrapper[4798]: I1011 04:13:32.074850 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2ab3945b-9225-4901-a3ed-57ea8dcaf456-scripts\") pod \"nova-cell0-cell-mapping-tqmm5\" (UID: \"2ab3945b-9225-4901-a3ed-57ea8dcaf456\") " pod="openstack/nova-cell0-cell-mapping-tqmm5" Oct 11 04:13:32 crc kubenswrapper[4798]: I1011 04:13:32.149634 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Oct 11 04:13:32 crc kubenswrapper[4798]: I1011 04:13:32.150907 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Oct 11 04:13:32 crc kubenswrapper[4798]: I1011 04:13:32.153083 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Oct 11 04:13:32 crc kubenswrapper[4798]: I1011 04:13:32.177078 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2ab3945b-9225-4901-a3ed-57ea8dcaf456-config-data\") pod \"nova-cell0-cell-mapping-tqmm5\" (UID: \"2ab3945b-9225-4901-a3ed-57ea8dcaf456\") " pod="openstack/nova-cell0-cell-mapping-tqmm5" Oct 11 04:13:32 crc kubenswrapper[4798]: I1011 04:13:32.177259 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2ab3945b-9225-4901-a3ed-57ea8dcaf456-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-tqmm5\" (UID: \"2ab3945b-9225-4901-a3ed-57ea8dcaf456\") " pod="openstack/nova-cell0-cell-mapping-tqmm5" Oct 11 04:13:32 crc kubenswrapper[4798]: I1011 04:13:32.177301 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2ab3945b-9225-4901-a3ed-57ea8dcaf456-scripts\") pod \"nova-cell0-cell-mapping-tqmm5\" (UID: \"2ab3945b-9225-4901-a3ed-57ea8dcaf456\") " pod="openstack/nova-cell0-cell-mapping-tqmm5" Oct 11 04:13:32 crc kubenswrapper[4798]: I1011 04:13:32.177416 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v9wck\" (UniqueName: \"kubernetes.io/projected/2ab3945b-9225-4901-a3ed-57ea8dcaf456-kube-api-access-v9wck\") pod \"nova-cell0-cell-mapping-tqmm5\" (UID: \"2ab3945b-9225-4901-a3ed-57ea8dcaf456\") " pod="openstack/nova-cell0-cell-mapping-tqmm5" Oct 11 04:13:32 crc kubenswrapper[4798]: I1011 04:13:32.178286 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Oct 11 04:13:32 crc kubenswrapper[4798]: I1011 04:13:32.185990 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2ab3945b-9225-4901-a3ed-57ea8dcaf456-scripts\") pod \"nova-cell0-cell-mapping-tqmm5\" (UID: \"2ab3945b-9225-4901-a3ed-57ea8dcaf456\") " pod="openstack/nova-cell0-cell-mapping-tqmm5" Oct 11 04:13:32 crc kubenswrapper[4798]: I1011 04:13:32.195551 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2ab3945b-9225-4901-a3ed-57ea8dcaf456-config-data\") pod \"nova-cell0-cell-mapping-tqmm5\" (UID: \"2ab3945b-9225-4901-a3ed-57ea8dcaf456\") " pod="openstack/nova-cell0-cell-mapping-tqmm5" Oct 11 04:13:32 crc kubenswrapper[4798]: I1011 04:13:32.196244 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2ab3945b-9225-4901-a3ed-57ea8dcaf456-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-tqmm5\" (UID: \"2ab3945b-9225-4901-a3ed-57ea8dcaf456\") " pod="openstack/nova-cell0-cell-mapping-tqmm5" Oct 11 04:13:32 crc kubenswrapper[4798]: I1011 04:13:32.228921 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Oct 11 04:13:32 crc kubenswrapper[4798]: I1011 04:13:32.230781 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Oct 11 04:13:32 crc kubenswrapper[4798]: I1011 04:13:32.234417 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v9wck\" (UniqueName: \"kubernetes.io/projected/2ab3945b-9225-4901-a3ed-57ea8dcaf456-kube-api-access-v9wck\") pod \"nova-cell0-cell-mapping-tqmm5\" (UID: \"2ab3945b-9225-4901-a3ed-57ea8dcaf456\") " pod="openstack/nova-cell0-cell-mapping-tqmm5" Oct 11 04:13:32 crc kubenswrapper[4798]: I1011 04:13:32.242837 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Oct 11 04:13:32 crc kubenswrapper[4798]: I1011 04:13:32.271649 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Oct 11 04:13:32 crc kubenswrapper[4798]: I1011 04:13:32.278757 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/658d57c2-d353-435e-9715-abc5c2a66d1b-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"658d57c2-d353-435e-9715-abc5c2a66d1b\") " pod="openstack/nova-cell1-novncproxy-0" Oct 11 04:13:32 crc kubenswrapper[4798]: I1011 04:13:32.278853 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/658d57c2-d353-435e-9715-abc5c2a66d1b-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"658d57c2-d353-435e-9715-abc5c2a66d1b\") " pod="openstack/nova-cell1-novncproxy-0" Oct 11 04:13:32 crc kubenswrapper[4798]: I1011 04:13:32.278881 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-54pxx\" (UniqueName: \"kubernetes.io/projected/658d57c2-d353-435e-9715-abc5c2a66d1b-kube-api-access-54pxx\") pod \"nova-cell1-novncproxy-0\" (UID: \"658d57c2-d353-435e-9715-abc5c2a66d1b\") " pod="openstack/nova-cell1-novncproxy-0" Oct 11 04:13:32 crc kubenswrapper[4798]: I1011 04:13:32.306918 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-tqmm5" Oct 11 04:13:32 crc kubenswrapper[4798]: I1011 04:13:32.333293 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Oct 11 04:13:32 crc kubenswrapper[4798]: I1011 04:13:32.334976 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Oct 11 04:13:32 crc kubenswrapper[4798]: I1011 04:13:32.338692 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Oct 11 04:13:32 crc kubenswrapper[4798]: I1011 04:13:32.347426 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-8b8cf6657-zdg5l"] Oct 11 04:13:32 crc kubenswrapper[4798]: I1011 04:13:32.350234 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8b8cf6657-zdg5l" Oct 11 04:13:32 crc kubenswrapper[4798]: I1011 04:13:32.376921 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Oct 11 04:13:32 crc kubenswrapper[4798]: I1011 04:13:32.382862 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0a03a628-a6ad-440b-9060-6e561726da91-logs\") pod \"nova-metadata-0\" (UID: \"0a03a628-a6ad-440b-9060-6e561726da91\") " pod="openstack/nova-metadata-0" Oct 11 04:13:32 crc kubenswrapper[4798]: I1011 04:13:32.382957 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0a03a628-a6ad-440b-9060-6e561726da91-config-data\") pod \"nova-metadata-0\" (UID: \"0a03a628-a6ad-440b-9060-6e561726da91\") " pod="openstack/nova-metadata-0" Oct 11 04:13:32 crc kubenswrapper[4798]: I1011 04:13:32.383007 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/658d57c2-d353-435e-9715-abc5c2a66d1b-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"658d57c2-d353-435e-9715-abc5c2a66d1b\") " pod="openstack/nova-cell1-novncproxy-0" Oct 11 04:13:32 crc kubenswrapper[4798]: I1011 04:13:32.383042 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xjgfr\" (UniqueName: \"kubernetes.io/projected/0a03a628-a6ad-440b-9060-6e561726da91-kube-api-access-xjgfr\") pod \"nova-metadata-0\" (UID: \"0a03a628-a6ad-440b-9060-6e561726da91\") " pod="openstack/nova-metadata-0" Oct 11 04:13:32 crc kubenswrapper[4798]: I1011 04:13:32.383107 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/658d57c2-d353-435e-9715-abc5c2a66d1b-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"658d57c2-d353-435e-9715-abc5c2a66d1b\") " pod="openstack/nova-cell1-novncproxy-0" Oct 11 04:13:32 crc kubenswrapper[4798]: I1011 04:13:32.383122 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-54pxx\" (UniqueName: \"kubernetes.io/projected/658d57c2-d353-435e-9715-abc5c2a66d1b-kube-api-access-54pxx\") pod \"nova-cell1-novncproxy-0\" (UID: \"658d57c2-d353-435e-9715-abc5c2a66d1b\") " pod="openstack/nova-cell1-novncproxy-0" Oct 11 04:13:32 crc kubenswrapper[4798]: I1011 04:13:32.383185 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0a03a628-a6ad-440b-9060-6e561726da91-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"0a03a628-a6ad-440b-9060-6e561726da91\") " pod="openstack/nova-metadata-0" Oct 11 04:13:32 crc kubenswrapper[4798]: I1011 04:13:32.393123 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/658d57c2-d353-435e-9715-abc5c2a66d1b-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"658d57c2-d353-435e-9715-abc5c2a66d1b\") " pod="openstack/nova-cell1-novncproxy-0" Oct 11 04:13:32 crc kubenswrapper[4798]: I1011 04:13:32.399863 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/658d57c2-d353-435e-9715-abc5c2a66d1b-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"658d57c2-d353-435e-9715-abc5c2a66d1b\") " pod="openstack/nova-cell1-novncproxy-0" Oct 11 04:13:32 crc kubenswrapper[4798]: I1011 04:13:32.420812 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-54pxx\" (UniqueName: \"kubernetes.io/projected/658d57c2-d353-435e-9715-abc5c2a66d1b-kube-api-access-54pxx\") pod \"nova-cell1-novncproxy-0\" (UID: \"658d57c2-d353-435e-9715-abc5c2a66d1b\") " pod="openstack/nova-cell1-novncproxy-0" Oct 11 04:13:32 crc kubenswrapper[4798]: I1011 04:13:32.420897 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-8b8cf6657-zdg5l"] Oct 11 04:13:32 crc kubenswrapper[4798]: I1011 04:13:32.503328 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j4r2g\" (UniqueName: \"kubernetes.io/projected/31e86dba-00e8-44be-8039-05c5043c67e1-kube-api-access-j4r2g\") pod \"nova-api-0\" (UID: \"31e86dba-00e8-44be-8039-05c5043c67e1\") " pod="openstack/nova-api-0" Oct 11 04:13:32 crc kubenswrapper[4798]: I1011 04:13:32.503955 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/31e86dba-00e8-44be-8039-05c5043c67e1-logs\") pod \"nova-api-0\" (UID: \"31e86dba-00e8-44be-8039-05c5043c67e1\") " pod="openstack/nova-api-0" Oct 11 04:13:32 crc kubenswrapper[4798]: I1011 04:13:32.503984 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0a03a628-a6ad-440b-9060-6e561726da91-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"0a03a628-a6ad-440b-9060-6e561726da91\") " pod="openstack/nova-metadata-0" Oct 11 04:13:32 crc kubenswrapper[4798]: I1011 04:13:32.504011 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2b43dc17-45f9-4c69-84cb-5590a4098add-ovsdbserver-sb\") pod \"dnsmasq-dns-8b8cf6657-zdg5l\" (UID: \"2b43dc17-45f9-4c69-84cb-5590a4098add\") " pod="openstack/dnsmasq-dns-8b8cf6657-zdg5l" Oct 11 04:13:32 crc kubenswrapper[4798]: I1011 04:13:32.504038 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dvh4l\" (UniqueName: \"kubernetes.io/projected/2b43dc17-45f9-4c69-84cb-5590a4098add-kube-api-access-dvh4l\") pod \"dnsmasq-dns-8b8cf6657-zdg5l\" (UID: \"2b43dc17-45f9-4c69-84cb-5590a4098add\") " pod="openstack/dnsmasq-dns-8b8cf6657-zdg5l" Oct 11 04:13:32 crc kubenswrapper[4798]: I1011 04:13:32.504056 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/31e86dba-00e8-44be-8039-05c5043c67e1-config-data\") pod \"nova-api-0\" (UID: \"31e86dba-00e8-44be-8039-05c5043c67e1\") " pod="openstack/nova-api-0" Oct 11 04:13:32 crc kubenswrapper[4798]: I1011 04:13:32.504082 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2b43dc17-45f9-4c69-84cb-5590a4098add-config\") pod \"dnsmasq-dns-8b8cf6657-zdg5l\" (UID: \"2b43dc17-45f9-4c69-84cb-5590a4098add\") " pod="openstack/dnsmasq-dns-8b8cf6657-zdg5l" Oct 11 04:13:32 crc kubenswrapper[4798]: I1011 04:13:32.504105 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2b43dc17-45f9-4c69-84cb-5590a4098add-ovsdbserver-nb\") pod \"dnsmasq-dns-8b8cf6657-zdg5l\" (UID: \"2b43dc17-45f9-4c69-84cb-5590a4098add\") " pod="openstack/dnsmasq-dns-8b8cf6657-zdg5l" Oct 11 04:13:32 crc kubenswrapper[4798]: I1011 04:13:32.504176 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0a03a628-a6ad-440b-9060-6e561726da91-logs\") pod \"nova-metadata-0\" (UID: \"0a03a628-a6ad-440b-9060-6e561726da91\") " pod="openstack/nova-metadata-0" Oct 11 04:13:32 crc kubenswrapper[4798]: I1011 04:13:32.504224 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0a03a628-a6ad-440b-9060-6e561726da91-config-data\") pod \"nova-metadata-0\" (UID: \"0a03a628-a6ad-440b-9060-6e561726da91\") " pod="openstack/nova-metadata-0" Oct 11 04:13:32 crc kubenswrapper[4798]: I1011 04:13:32.504242 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/31e86dba-00e8-44be-8039-05c5043c67e1-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"31e86dba-00e8-44be-8039-05c5043c67e1\") " pod="openstack/nova-api-0" Oct 11 04:13:32 crc kubenswrapper[4798]: I1011 04:13:32.504270 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2b43dc17-45f9-4c69-84cb-5590a4098add-dns-svc\") pod \"dnsmasq-dns-8b8cf6657-zdg5l\" (UID: \"2b43dc17-45f9-4c69-84cb-5590a4098add\") " pod="openstack/dnsmasq-dns-8b8cf6657-zdg5l" Oct 11 04:13:32 crc kubenswrapper[4798]: I1011 04:13:32.504311 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xjgfr\" (UniqueName: \"kubernetes.io/projected/0a03a628-a6ad-440b-9060-6e561726da91-kube-api-access-xjgfr\") pod \"nova-metadata-0\" (UID: \"0a03a628-a6ad-440b-9060-6e561726da91\") " pod="openstack/nova-metadata-0" Oct 11 04:13:32 crc kubenswrapper[4798]: I1011 04:13:32.506150 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Oct 11 04:13:32 crc kubenswrapper[4798]: I1011 04:13:32.507842 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0a03a628-a6ad-440b-9060-6e561726da91-logs\") pod \"nova-metadata-0\" (UID: \"0a03a628-a6ad-440b-9060-6e561726da91\") " pod="openstack/nova-metadata-0" Oct 11 04:13:32 crc kubenswrapper[4798]: I1011 04:13:32.513373 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0a03a628-a6ad-440b-9060-6e561726da91-config-data\") pod \"nova-metadata-0\" (UID: \"0a03a628-a6ad-440b-9060-6e561726da91\") " pod="openstack/nova-metadata-0" Oct 11 04:13:32 crc kubenswrapper[4798]: I1011 04:13:32.524738 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0a03a628-a6ad-440b-9060-6e561726da91-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"0a03a628-a6ad-440b-9060-6e561726da91\") " pod="openstack/nova-metadata-0" Oct 11 04:13:32 crc kubenswrapper[4798]: I1011 04:13:32.550011 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xjgfr\" (UniqueName: \"kubernetes.io/projected/0a03a628-a6ad-440b-9060-6e561726da91-kube-api-access-xjgfr\") pod \"nova-metadata-0\" (UID: \"0a03a628-a6ad-440b-9060-6e561726da91\") " pod="openstack/nova-metadata-0" Oct 11 04:13:32 crc kubenswrapper[4798]: I1011 04:13:32.640637 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Oct 11 04:13:32 crc kubenswrapper[4798]: I1011 04:13:32.642296 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/31e86dba-00e8-44be-8039-05c5043c67e1-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"31e86dba-00e8-44be-8039-05c5043c67e1\") " pod="openstack/nova-api-0" Oct 11 04:13:32 crc kubenswrapper[4798]: I1011 04:13:32.642384 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2b43dc17-45f9-4c69-84cb-5590a4098add-dns-svc\") pod \"dnsmasq-dns-8b8cf6657-zdg5l\" (UID: \"2b43dc17-45f9-4c69-84cb-5590a4098add\") " pod="openstack/dnsmasq-dns-8b8cf6657-zdg5l" Oct 11 04:13:32 crc kubenswrapper[4798]: I1011 04:13:32.643446 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j4r2g\" (UniqueName: \"kubernetes.io/projected/31e86dba-00e8-44be-8039-05c5043c67e1-kube-api-access-j4r2g\") pod \"nova-api-0\" (UID: \"31e86dba-00e8-44be-8039-05c5043c67e1\") " pod="openstack/nova-api-0" Oct 11 04:13:32 crc kubenswrapper[4798]: I1011 04:13:32.643843 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/31e86dba-00e8-44be-8039-05c5043c67e1-logs\") pod \"nova-api-0\" (UID: \"31e86dba-00e8-44be-8039-05c5043c67e1\") " pod="openstack/nova-api-0" Oct 11 04:13:32 crc kubenswrapper[4798]: I1011 04:13:32.644284 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2b43dc17-45f9-4c69-84cb-5590a4098add-ovsdbserver-sb\") pod \"dnsmasq-dns-8b8cf6657-zdg5l\" (UID: \"2b43dc17-45f9-4c69-84cb-5590a4098add\") " pod="openstack/dnsmasq-dns-8b8cf6657-zdg5l" Oct 11 04:13:32 crc kubenswrapper[4798]: I1011 04:13:32.644705 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dvh4l\" (UniqueName: \"kubernetes.io/projected/2b43dc17-45f9-4c69-84cb-5590a4098add-kube-api-access-dvh4l\") pod \"dnsmasq-dns-8b8cf6657-zdg5l\" (UID: \"2b43dc17-45f9-4c69-84cb-5590a4098add\") " pod="openstack/dnsmasq-dns-8b8cf6657-zdg5l" Oct 11 04:13:32 crc kubenswrapper[4798]: I1011 04:13:32.645234 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/31e86dba-00e8-44be-8039-05c5043c67e1-config-data\") pod \"nova-api-0\" (UID: \"31e86dba-00e8-44be-8039-05c5043c67e1\") " pod="openstack/nova-api-0" Oct 11 04:13:32 crc kubenswrapper[4798]: I1011 04:13:32.690980 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2b43dc17-45f9-4c69-84cb-5590a4098add-config\") pod \"dnsmasq-dns-8b8cf6657-zdg5l\" (UID: \"2b43dc17-45f9-4c69-84cb-5590a4098add\") " pod="openstack/dnsmasq-dns-8b8cf6657-zdg5l" Oct 11 04:13:32 crc kubenswrapper[4798]: I1011 04:13:32.702187 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2b43dc17-45f9-4c69-84cb-5590a4098add-ovsdbserver-nb\") pod \"dnsmasq-dns-8b8cf6657-zdg5l\" (UID: \"2b43dc17-45f9-4c69-84cb-5590a4098add\") " pod="openstack/dnsmasq-dns-8b8cf6657-zdg5l" Oct 11 04:13:32 crc kubenswrapper[4798]: I1011 04:13:32.686938 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2b43dc17-45f9-4c69-84cb-5590a4098add-ovsdbserver-sb\") pod \"dnsmasq-dns-8b8cf6657-zdg5l\" (UID: \"2b43dc17-45f9-4c69-84cb-5590a4098add\") " pod="openstack/dnsmasq-dns-8b8cf6657-zdg5l" Oct 11 04:13:32 crc kubenswrapper[4798]: I1011 04:13:32.686360 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2b43dc17-45f9-4c69-84cb-5590a4098add-dns-svc\") pod \"dnsmasq-dns-8b8cf6657-zdg5l\" (UID: \"2b43dc17-45f9-4c69-84cb-5590a4098add\") " pod="openstack/dnsmasq-dns-8b8cf6657-zdg5l" Oct 11 04:13:32 crc kubenswrapper[4798]: I1011 04:13:32.701582 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Oct 11 04:13:32 crc kubenswrapper[4798]: I1011 04:13:32.710105 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/31e86dba-00e8-44be-8039-05c5043c67e1-config-data\") pod \"nova-api-0\" (UID: \"31e86dba-00e8-44be-8039-05c5043c67e1\") " pod="openstack/nova-api-0" Oct 11 04:13:32 crc kubenswrapper[4798]: I1011 04:13:32.701494 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/31e86dba-00e8-44be-8039-05c5043c67e1-logs\") pod \"nova-api-0\" (UID: \"31e86dba-00e8-44be-8039-05c5043c67e1\") " pod="openstack/nova-api-0" Oct 11 04:13:32 crc kubenswrapper[4798]: I1011 04:13:32.718850 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j4r2g\" (UniqueName: \"kubernetes.io/projected/31e86dba-00e8-44be-8039-05c5043c67e1-kube-api-access-j4r2g\") pod \"nova-api-0\" (UID: \"31e86dba-00e8-44be-8039-05c5043c67e1\") " pod="openstack/nova-api-0" Oct 11 04:13:32 crc kubenswrapper[4798]: I1011 04:13:32.720347 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2b43dc17-45f9-4c69-84cb-5590a4098add-config\") pod \"dnsmasq-dns-8b8cf6657-zdg5l\" (UID: \"2b43dc17-45f9-4c69-84cb-5590a4098add\") " pod="openstack/dnsmasq-dns-8b8cf6657-zdg5l" Oct 11 04:13:32 crc kubenswrapper[4798]: I1011 04:13:32.721653 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dvh4l\" (UniqueName: \"kubernetes.io/projected/2b43dc17-45f9-4c69-84cb-5590a4098add-kube-api-access-dvh4l\") pod \"dnsmasq-dns-8b8cf6657-zdg5l\" (UID: \"2b43dc17-45f9-4c69-84cb-5590a4098add\") " pod="openstack/dnsmasq-dns-8b8cf6657-zdg5l" Oct 11 04:13:32 crc kubenswrapper[4798]: I1011 04:13:32.729130 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Oct 11 04:13:32 crc kubenswrapper[4798]: I1011 04:13:32.734200 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Oct 11 04:13:32 crc kubenswrapper[4798]: I1011 04:13:32.798431 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2b43dc17-45f9-4c69-84cb-5590a4098add-ovsdbserver-nb\") pod \"dnsmasq-dns-8b8cf6657-zdg5l\" (UID: \"2b43dc17-45f9-4c69-84cb-5590a4098add\") " pod="openstack/dnsmasq-dns-8b8cf6657-zdg5l" Oct 11 04:13:32 crc kubenswrapper[4798]: I1011 04:13:32.804992 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/31e86dba-00e8-44be-8039-05c5043c67e1-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"31e86dba-00e8-44be-8039-05c5043c67e1\") " pod="openstack/nova-api-0" Oct 11 04:13:32 crc kubenswrapper[4798]: I1011 04:13:32.826815 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Oct 11 04:13:32 crc kubenswrapper[4798]: I1011 04:13:32.837465 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Oct 11 04:13:32 crc kubenswrapper[4798]: I1011 04:13:32.878536 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8b8cf6657-zdg5l" Oct 11 04:13:32 crc kubenswrapper[4798]: I1011 04:13:32.916449 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6jrg4\" (UniqueName: \"kubernetes.io/projected/c4ba238e-3ec7-4703-862c-a1f14abda9c9-kube-api-access-6jrg4\") pod \"nova-scheduler-0\" (UID: \"c4ba238e-3ec7-4703-862c-a1f14abda9c9\") " pod="openstack/nova-scheduler-0" Oct 11 04:13:32 crc kubenswrapper[4798]: I1011 04:13:32.916522 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c4ba238e-3ec7-4703-862c-a1f14abda9c9-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"c4ba238e-3ec7-4703-862c-a1f14abda9c9\") " pod="openstack/nova-scheduler-0" Oct 11 04:13:32 crc kubenswrapper[4798]: I1011 04:13:32.916591 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c4ba238e-3ec7-4703-862c-a1f14abda9c9-config-data\") pod \"nova-scheduler-0\" (UID: \"c4ba238e-3ec7-4703-862c-a1f14abda9c9\") " pod="openstack/nova-scheduler-0" Oct 11 04:13:32 crc kubenswrapper[4798]: I1011 04:13:32.982342 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-tqmm5"] Oct 11 04:13:33 crc kubenswrapper[4798]: I1011 04:13:33.018116 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6jrg4\" (UniqueName: \"kubernetes.io/projected/c4ba238e-3ec7-4703-862c-a1f14abda9c9-kube-api-access-6jrg4\") pod \"nova-scheduler-0\" (UID: \"c4ba238e-3ec7-4703-862c-a1f14abda9c9\") " pod="openstack/nova-scheduler-0" Oct 11 04:13:33 crc kubenswrapper[4798]: I1011 04:13:33.018192 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c4ba238e-3ec7-4703-862c-a1f14abda9c9-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"c4ba238e-3ec7-4703-862c-a1f14abda9c9\") " pod="openstack/nova-scheduler-0" Oct 11 04:13:33 crc kubenswrapper[4798]: I1011 04:13:33.018274 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c4ba238e-3ec7-4703-862c-a1f14abda9c9-config-data\") pod \"nova-scheduler-0\" (UID: \"c4ba238e-3ec7-4703-862c-a1f14abda9c9\") " pod="openstack/nova-scheduler-0" Oct 11 04:13:33 crc kubenswrapper[4798]: I1011 04:13:33.027126 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c4ba238e-3ec7-4703-862c-a1f14abda9c9-config-data\") pod \"nova-scheduler-0\" (UID: \"c4ba238e-3ec7-4703-862c-a1f14abda9c9\") " pod="openstack/nova-scheduler-0" Oct 11 04:13:33 crc kubenswrapper[4798]: I1011 04:13:33.035758 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c4ba238e-3ec7-4703-862c-a1f14abda9c9-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"c4ba238e-3ec7-4703-862c-a1f14abda9c9\") " pod="openstack/nova-scheduler-0" Oct 11 04:13:33 crc kubenswrapper[4798]: I1011 04:13:33.055767 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6jrg4\" (UniqueName: \"kubernetes.io/projected/c4ba238e-3ec7-4703-862c-a1f14abda9c9-kube-api-access-6jrg4\") pod \"nova-scheduler-0\" (UID: \"c4ba238e-3ec7-4703-862c-a1f14abda9c9\") " pod="openstack/nova-scheduler-0" Oct 11 04:13:33 crc kubenswrapper[4798]: I1011 04:13:33.137300 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-tqmm5" event={"ID":"2ab3945b-9225-4901-a3ed-57ea8dcaf456","Type":"ContainerStarted","Data":"4e77ffef7eaed2ea7540627cdaf16421623713acec26451f9d323302da2e65b8"} Oct 11 04:13:33 crc kubenswrapper[4798]: I1011 04:13:33.170201 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Oct 11 04:13:33 crc kubenswrapper[4798]: I1011 04:13:33.267021 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Oct 11 04:13:33 crc kubenswrapper[4798]: I1011 04:13:33.378115 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Oct 11 04:13:33 crc kubenswrapper[4798]: I1011 04:13:33.521743 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-db-sync-vj9wj"] Oct 11 04:13:33 crc kubenswrapper[4798]: I1011 04:13:33.523114 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-vj9wj" Oct 11 04:13:33 crc kubenswrapper[4798]: I1011 04:13:33.527114 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-scripts" Oct 11 04:13:33 crc kubenswrapper[4798]: I1011 04:13:33.527346 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Oct 11 04:13:33 crc kubenswrapper[4798]: I1011 04:13:33.529849 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-vj9wj"] Oct 11 04:13:33 crc kubenswrapper[4798]: I1011 04:13:33.563726 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Oct 11 04:13:33 crc kubenswrapper[4798]: I1011 04:13:33.636085 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2da346d6-33cf-4000-8cf9-f269b86bda5d-config-data\") pod \"nova-cell1-conductor-db-sync-vj9wj\" (UID: \"2da346d6-33cf-4000-8cf9-f269b86bda5d\") " pod="openstack/nova-cell1-conductor-db-sync-vj9wj" Oct 11 04:13:33 crc kubenswrapper[4798]: I1011 04:13:33.636234 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2da346d6-33cf-4000-8cf9-f269b86bda5d-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-vj9wj\" (UID: \"2da346d6-33cf-4000-8cf9-f269b86bda5d\") " pod="openstack/nova-cell1-conductor-db-sync-vj9wj" Oct 11 04:13:33 crc kubenswrapper[4798]: I1011 04:13:33.636270 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2da346d6-33cf-4000-8cf9-f269b86bda5d-scripts\") pod \"nova-cell1-conductor-db-sync-vj9wj\" (UID: \"2da346d6-33cf-4000-8cf9-f269b86bda5d\") " pod="openstack/nova-cell1-conductor-db-sync-vj9wj" Oct 11 04:13:33 crc kubenswrapper[4798]: I1011 04:13:33.636306 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5xfdl\" (UniqueName: \"kubernetes.io/projected/2da346d6-33cf-4000-8cf9-f269b86bda5d-kube-api-access-5xfdl\") pod \"nova-cell1-conductor-db-sync-vj9wj\" (UID: \"2da346d6-33cf-4000-8cf9-f269b86bda5d\") " pod="openstack/nova-cell1-conductor-db-sync-vj9wj" Oct 11 04:13:33 crc kubenswrapper[4798]: I1011 04:13:33.738053 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2da346d6-33cf-4000-8cf9-f269b86bda5d-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-vj9wj\" (UID: \"2da346d6-33cf-4000-8cf9-f269b86bda5d\") " pod="openstack/nova-cell1-conductor-db-sync-vj9wj" Oct 11 04:13:33 crc kubenswrapper[4798]: I1011 04:13:33.738100 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2da346d6-33cf-4000-8cf9-f269b86bda5d-scripts\") pod \"nova-cell1-conductor-db-sync-vj9wj\" (UID: \"2da346d6-33cf-4000-8cf9-f269b86bda5d\") " pod="openstack/nova-cell1-conductor-db-sync-vj9wj" Oct 11 04:13:33 crc kubenswrapper[4798]: I1011 04:13:33.738138 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5xfdl\" (UniqueName: \"kubernetes.io/projected/2da346d6-33cf-4000-8cf9-f269b86bda5d-kube-api-access-5xfdl\") pod \"nova-cell1-conductor-db-sync-vj9wj\" (UID: \"2da346d6-33cf-4000-8cf9-f269b86bda5d\") " pod="openstack/nova-cell1-conductor-db-sync-vj9wj" Oct 11 04:13:33 crc kubenswrapper[4798]: I1011 04:13:33.738209 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2da346d6-33cf-4000-8cf9-f269b86bda5d-config-data\") pod \"nova-cell1-conductor-db-sync-vj9wj\" (UID: \"2da346d6-33cf-4000-8cf9-f269b86bda5d\") " pod="openstack/nova-cell1-conductor-db-sync-vj9wj" Oct 11 04:13:33 crc kubenswrapper[4798]: I1011 04:13:33.745594 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2da346d6-33cf-4000-8cf9-f269b86bda5d-config-data\") pod \"nova-cell1-conductor-db-sync-vj9wj\" (UID: \"2da346d6-33cf-4000-8cf9-f269b86bda5d\") " pod="openstack/nova-cell1-conductor-db-sync-vj9wj" Oct 11 04:13:33 crc kubenswrapper[4798]: I1011 04:13:33.747971 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2da346d6-33cf-4000-8cf9-f269b86bda5d-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-vj9wj\" (UID: \"2da346d6-33cf-4000-8cf9-f269b86bda5d\") " pod="openstack/nova-cell1-conductor-db-sync-vj9wj" Oct 11 04:13:33 crc kubenswrapper[4798]: I1011 04:13:33.753103 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2da346d6-33cf-4000-8cf9-f269b86bda5d-scripts\") pod \"nova-cell1-conductor-db-sync-vj9wj\" (UID: \"2da346d6-33cf-4000-8cf9-f269b86bda5d\") " pod="openstack/nova-cell1-conductor-db-sync-vj9wj" Oct 11 04:13:33 crc kubenswrapper[4798]: I1011 04:13:33.757035 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Oct 11 04:13:33 crc kubenswrapper[4798]: I1011 04:13:33.762209 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5xfdl\" (UniqueName: \"kubernetes.io/projected/2da346d6-33cf-4000-8cf9-f269b86bda5d-kube-api-access-5xfdl\") pod \"nova-cell1-conductor-db-sync-vj9wj\" (UID: \"2da346d6-33cf-4000-8cf9-f269b86bda5d\") " pod="openstack/nova-cell1-conductor-db-sync-vj9wj" Oct 11 04:13:33 crc kubenswrapper[4798]: I1011 04:13:33.776297 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-8b8cf6657-zdg5l"] Oct 11 04:13:33 crc kubenswrapper[4798]: I1011 04:13:33.861114 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-vj9wj" Oct 11 04:13:34 crc kubenswrapper[4798]: I1011 04:13:34.153023 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"658d57c2-d353-435e-9715-abc5c2a66d1b","Type":"ContainerStarted","Data":"9f87fb84517092d246016bfc37a989be01223117aab7de7aa0023b00738884c0"} Oct 11 04:13:34 crc kubenswrapper[4798]: I1011 04:13:34.155867 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"31e86dba-00e8-44be-8039-05c5043c67e1","Type":"ContainerStarted","Data":"c1e847f1a6c50fff69b893875f7905b5bf7247cb7e8192ea557854ab446fdff5"} Oct 11 04:13:34 crc kubenswrapper[4798]: I1011 04:13:34.159662 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"c4ba238e-3ec7-4703-862c-a1f14abda9c9","Type":"ContainerStarted","Data":"3cbc1cddafb0d0adb7881b49de4a92268dea769dbd8f40f6b6084b6c0747f614"} Oct 11 04:13:34 crc kubenswrapper[4798]: I1011 04:13:34.162361 4798 generic.go:334] "Generic (PLEG): container finished" podID="2b43dc17-45f9-4c69-84cb-5590a4098add" containerID="13a26e002a8fced191e8e9f1273dc82d079919ede9b65cd71ca238c5bacd40e8" exitCode=0 Oct 11 04:13:34 crc kubenswrapper[4798]: I1011 04:13:34.162452 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8b8cf6657-zdg5l" event={"ID":"2b43dc17-45f9-4c69-84cb-5590a4098add","Type":"ContainerDied","Data":"13a26e002a8fced191e8e9f1273dc82d079919ede9b65cd71ca238c5bacd40e8"} Oct 11 04:13:34 crc kubenswrapper[4798]: I1011 04:13:34.162476 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8b8cf6657-zdg5l" event={"ID":"2b43dc17-45f9-4c69-84cb-5590a4098add","Type":"ContainerStarted","Data":"177be28dfade2e70f43f82cb5272f7bc1a5c98bf543ea5205b1de90ac0031be2"} Oct 11 04:13:34 crc kubenswrapper[4798]: I1011 04:13:34.172644 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-tqmm5" event={"ID":"2ab3945b-9225-4901-a3ed-57ea8dcaf456","Type":"ContainerStarted","Data":"0ab241daf52edb119b3355fdc6d0a8cc66a5baedfe949bad0a95c5b1529d7866"} Oct 11 04:13:34 crc kubenswrapper[4798]: I1011 04:13:34.175626 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"0a03a628-a6ad-440b-9060-6e561726da91","Type":"ContainerStarted","Data":"5bdfe37704aba889d9197eae37c3576ded48a12b012d9ae1919c1f2df649008f"} Oct 11 04:13:34 crc kubenswrapper[4798]: I1011 04:13:34.210116 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-cell-mapping-tqmm5" podStartSLOduration=3.210090449 podStartE2EDuration="3.210090449s" podCreationTimestamp="2025-10-11 04:13:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 04:13:34.204811603 +0000 UTC m=+1109.541101299" watchObservedRunningTime="2025-10-11 04:13:34.210090449 +0000 UTC m=+1109.546380135" Oct 11 04:13:34 crc kubenswrapper[4798]: I1011 04:13:34.410542 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-vj9wj"] Oct 11 04:13:35 crc kubenswrapper[4798]: I1011 04:13:35.209635 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-vj9wj" event={"ID":"2da346d6-33cf-4000-8cf9-f269b86bda5d","Type":"ContainerStarted","Data":"14b5a6837034b5a81eac5c1851fe2b0fed4bb2911bc051c6ae5cf941236cbc05"} Oct 11 04:13:35 crc kubenswrapper[4798]: I1011 04:13:35.210183 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-vj9wj" event={"ID":"2da346d6-33cf-4000-8cf9-f269b86bda5d","Type":"ContainerStarted","Data":"e6b9b5de953b5ed3490c5b502959940bc8d50d234082bbfe95aad936c06d24af"} Oct 11 04:13:35 crc kubenswrapper[4798]: I1011 04:13:35.225442 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8b8cf6657-zdg5l" event={"ID":"2b43dc17-45f9-4c69-84cb-5590a4098add","Type":"ContainerStarted","Data":"1b3374b1b370c07ed3cfc864fe3a8059fc5425c371e82d16ec7722e5cd1043fa"} Oct 11 04:13:35 crc kubenswrapper[4798]: I1011 04:13:35.225518 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-8b8cf6657-zdg5l" Oct 11 04:13:35 crc kubenswrapper[4798]: I1011 04:13:35.242707 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-db-sync-vj9wj" podStartSLOduration=2.242685125 podStartE2EDuration="2.242685125s" podCreationTimestamp="2025-10-11 04:13:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 04:13:35.228708232 +0000 UTC m=+1110.564997918" watchObservedRunningTime="2025-10-11 04:13:35.242685125 +0000 UTC m=+1110.578974811" Oct 11 04:13:35 crc kubenswrapper[4798]: I1011 04:13:35.251763 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-8b8cf6657-zdg5l" podStartSLOduration=3.2517464609999998 podStartE2EDuration="3.251746461s" podCreationTimestamp="2025-10-11 04:13:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 04:13:35.245674006 +0000 UTC m=+1110.581963692" watchObservedRunningTime="2025-10-11 04:13:35.251746461 +0000 UTC m=+1110.588036147" Oct 11 04:13:35 crc kubenswrapper[4798]: I1011 04:13:35.933482 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Oct 11 04:13:35 crc kubenswrapper[4798]: I1011 04:13:35.950434 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Oct 11 04:13:39 crc kubenswrapper[4798]: I1011 04:13:39.269318 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"0a03a628-a6ad-440b-9060-6e561726da91","Type":"ContainerStarted","Data":"c9b33e786c77b0bf2ca4090d1343706321563f21a36ac0160c23f08366a13784"} Oct 11 04:13:39 crc kubenswrapper[4798]: I1011 04:13:39.269994 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"0a03a628-a6ad-440b-9060-6e561726da91","Type":"ContainerStarted","Data":"f1110789bcb3957d71528ee93b6254f58077913089d36bef4130e9b602c660b7"} Oct 11 04:13:39 crc kubenswrapper[4798]: I1011 04:13:39.269661 4798 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="0a03a628-a6ad-440b-9060-6e561726da91" containerName="nova-metadata-metadata" containerID="cri-o://c9b33e786c77b0bf2ca4090d1343706321563f21a36ac0160c23f08366a13784" gracePeriod=30 Oct 11 04:13:39 crc kubenswrapper[4798]: I1011 04:13:39.269559 4798 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="0a03a628-a6ad-440b-9060-6e561726da91" containerName="nova-metadata-log" containerID="cri-o://f1110789bcb3957d71528ee93b6254f58077913089d36bef4130e9b602c660b7" gracePeriod=30 Oct 11 04:13:39 crc kubenswrapper[4798]: I1011 04:13:39.273261 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"658d57c2-d353-435e-9715-abc5c2a66d1b","Type":"ContainerStarted","Data":"540726f5d6dd87652fc5999dc34e04212fe67ea741fd00ebf0ce7d461f02dd3b"} Oct 11 04:13:39 crc kubenswrapper[4798]: I1011 04:13:39.273461 4798 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell1-novncproxy-0" podUID="658d57c2-d353-435e-9715-abc5c2a66d1b" containerName="nova-cell1-novncproxy-novncproxy" containerID="cri-o://540726f5d6dd87652fc5999dc34e04212fe67ea741fd00ebf0ce7d461f02dd3b" gracePeriod=30 Oct 11 04:13:39 crc kubenswrapper[4798]: I1011 04:13:39.278592 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"31e86dba-00e8-44be-8039-05c5043c67e1","Type":"ContainerStarted","Data":"872af61a38b33c8ac361866d9e9ed48f138aadb7251adfa87def45ce6d66fc18"} Oct 11 04:13:39 crc kubenswrapper[4798]: I1011 04:13:39.278650 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"31e86dba-00e8-44be-8039-05c5043c67e1","Type":"ContainerStarted","Data":"19acbc77625344bd76a48bc2a83efb20a94acd91cf5b3329ccd15ce284b91824"} Oct 11 04:13:39 crc kubenswrapper[4798]: I1011 04:13:39.285122 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"c4ba238e-3ec7-4703-862c-a1f14abda9c9","Type":"ContainerStarted","Data":"20ab35a996cf54a1a9321e38b400af1b14c71359b63cdf0a94c3b9aa76e08dc4"} Oct 11 04:13:39 crc kubenswrapper[4798]: I1011 04:13:39.309462 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.275655221 podStartE2EDuration="7.309440064s" podCreationTimestamp="2025-10-11 04:13:32 +0000 UTC" firstStartedPulling="2025-10-11 04:13:33.387848274 +0000 UTC m=+1108.724137980" lastFinishedPulling="2025-10-11 04:13:38.421633137 +0000 UTC m=+1113.757922823" observedRunningTime="2025-10-11 04:13:39.304099916 +0000 UTC m=+1114.640389602" watchObservedRunningTime="2025-10-11 04:13:39.309440064 +0000 UTC m=+1114.645729750" Oct 11 04:13:39 crc kubenswrapper[4798]: I1011 04:13:39.328265 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=2.193146396 podStartE2EDuration="7.328247771s" podCreationTimestamp="2025-10-11 04:13:32 +0000 UTC" firstStartedPulling="2025-10-11 04:13:33.288457527 +0000 UTC m=+1108.624747223" lastFinishedPulling="2025-10-11 04:13:38.423558892 +0000 UTC m=+1113.759848598" observedRunningTime="2025-10-11 04:13:39.32524961 +0000 UTC m=+1114.661539296" watchObservedRunningTime="2025-10-11 04:13:39.328247771 +0000 UTC m=+1114.664537457" Oct 11 04:13:39 crc kubenswrapper[4798]: I1011 04:13:39.355318 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.701972736 podStartE2EDuration="7.355300026s" podCreationTimestamp="2025-10-11 04:13:32 +0000 UTC" firstStartedPulling="2025-10-11 04:13:33.774539345 +0000 UTC m=+1109.110829021" lastFinishedPulling="2025-10-11 04:13:38.427866625 +0000 UTC m=+1113.764156311" observedRunningTime="2025-10-11 04:13:39.346298561 +0000 UTC m=+1114.682588247" watchObservedRunningTime="2025-10-11 04:13:39.355300026 +0000 UTC m=+1114.691589712" Oct 11 04:13:39 crc kubenswrapper[4798]: I1011 04:13:39.373990 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.524427416 podStartE2EDuration="7.373966201s" podCreationTimestamp="2025-10-11 04:13:32 +0000 UTC" firstStartedPulling="2025-10-11 04:13:33.575119934 +0000 UTC m=+1108.911409620" lastFinishedPulling="2025-10-11 04:13:38.424658719 +0000 UTC m=+1113.760948405" observedRunningTime="2025-10-11 04:13:39.365355686 +0000 UTC m=+1114.701645372" watchObservedRunningTime="2025-10-11 04:13:39.373966201 +0000 UTC m=+1114.710255887" Oct 11 04:13:39 crc kubenswrapper[4798]: I1011 04:13:39.884719 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Oct 11 04:13:39 crc kubenswrapper[4798]: I1011 04:13:39.989492 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xjgfr\" (UniqueName: \"kubernetes.io/projected/0a03a628-a6ad-440b-9060-6e561726da91-kube-api-access-xjgfr\") pod \"0a03a628-a6ad-440b-9060-6e561726da91\" (UID: \"0a03a628-a6ad-440b-9060-6e561726da91\") " Oct 11 04:13:39 crc kubenswrapper[4798]: I1011 04:13:39.989789 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0a03a628-a6ad-440b-9060-6e561726da91-logs\") pod \"0a03a628-a6ad-440b-9060-6e561726da91\" (UID: \"0a03a628-a6ad-440b-9060-6e561726da91\") " Oct 11 04:13:39 crc kubenswrapper[4798]: I1011 04:13:39.989830 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0a03a628-a6ad-440b-9060-6e561726da91-combined-ca-bundle\") pod \"0a03a628-a6ad-440b-9060-6e561726da91\" (UID: \"0a03a628-a6ad-440b-9060-6e561726da91\") " Oct 11 04:13:39 crc kubenswrapper[4798]: I1011 04:13:39.989874 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0a03a628-a6ad-440b-9060-6e561726da91-config-data\") pod \"0a03a628-a6ad-440b-9060-6e561726da91\" (UID: \"0a03a628-a6ad-440b-9060-6e561726da91\") " Oct 11 04:13:39 crc kubenswrapper[4798]: I1011 04:13:39.990374 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0a03a628-a6ad-440b-9060-6e561726da91-logs" (OuterVolumeSpecName: "logs") pod "0a03a628-a6ad-440b-9060-6e561726da91" (UID: "0a03a628-a6ad-440b-9060-6e561726da91"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 04:13:39 crc kubenswrapper[4798]: I1011 04:13:39.990775 4798 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0a03a628-a6ad-440b-9060-6e561726da91-logs\") on node \"crc\" DevicePath \"\"" Oct 11 04:13:39 crc kubenswrapper[4798]: I1011 04:13:39.996620 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0a03a628-a6ad-440b-9060-6e561726da91-kube-api-access-xjgfr" (OuterVolumeSpecName: "kube-api-access-xjgfr") pod "0a03a628-a6ad-440b-9060-6e561726da91" (UID: "0a03a628-a6ad-440b-9060-6e561726da91"). InnerVolumeSpecName "kube-api-access-xjgfr". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 04:13:40 crc kubenswrapper[4798]: I1011 04:13:40.028003 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0a03a628-a6ad-440b-9060-6e561726da91-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0a03a628-a6ad-440b-9060-6e561726da91" (UID: "0a03a628-a6ad-440b-9060-6e561726da91"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:13:40 crc kubenswrapper[4798]: I1011 04:13:40.033478 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0a03a628-a6ad-440b-9060-6e561726da91-config-data" (OuterVolumeSpecName: "config-data") pod "0a03a628-a6ad-440b-9060-6e561726da91" (UID: "0a03a628-a6ad-440b-9060-6e561726da91"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:13:40 crc kubenswrapper[4798]: I1011 04:13:40.092585 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xjgfr\" (UniqueName: \"kubernetes.io/projected/0a03a628-a6ad-440b-9060-6e561726da91-kube-api-access-xjgfr\") on node \"crc\" DevicePath \"\"" Oct 11 04:13:40 crc kubenswrapper[4798]: I1011 04:13:40.092625 4798 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0a03a628-a6ad-440b-9060-6e561726da91-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 11 04:13:40 crc kubenswrapper[4798]: I1011 04:13:40.092637 4798 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0a03a628-a6ad-440b-9060-6e561726da91-config-data\") on node \"crc\" DevicePath \"\"" Oct 11 04:13:40 crc kubenswrapper[4798]: I1011 04:13:40.300208 4798 generic.go:334] "Generic (PLEG): container finished" podID="0a03a628-a6ad-440b-9060-6e561726da91" containerID="c9b33e786c77b0bf2ca4090d1343706321563f21a36ac0160c23f08366a13784" exitCode=0 Oct 11 04:13:40 crc kubenswrapper[4798]: I1011 04:13:40.300274 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Oct 11 04:13:40 crc kubenswrapper[4798]: I1011 04:13:40.300322 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"0a03a628-a6ad-440b-9060-6e561726da91","Type":"ContainerDied","Data":"c9b33e786c77b0bf2ca4090d1343706321563f21a36ac0160c23f08366a13784"} Oct 11 04:13:40 crc kubenswrapper[4798]: I1011 04:13:40.300457 4798 generic.go:334] "Generic (PLEG): container finished" podID="0a03a628-a6ad-440b-9060-6e561726da91" containerID="f1110789bcb3957d71528ee93b6254f58077913089d36bef4130e9b602c660b7" exitCode=143 Oct 11 04:13:40 crc kubenswrapper[4798]: I1011 04:13:40.300613 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"0a03a628-a6ad-440b-9060-6e561726da91","Type":"ContainerDied","Data":"f1110789bcb3957d71528ee93b6254f58077913089d36bef4130e9b602c660b7"} Oct 11 04:13:40 crc kubenswrapper[4798]: I1011 04:13:40.300656 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"0a03a628-a6ad-440b-9060-6e561726da91","Type":"ContainerDied","Data":"5bdfe37704aba889d9197eae37c3576ded48a12b012d9ae1919c1f2df649008f"} Oct 11 04:13:40 crc kubenswrapper[4798]: I1011 04:13:40.300688 4798 scope.go:117] "RemoveContainer" containerID="c9b33e786c77b0bf2ca4090d1343706321563f21a36ac0160c23f08366a13784" Oct 11 04:13:40 crc kubenswrapper[4798]: I1011 04:13:40.335436 4798 scope.go:117] "RemoveContainer" containerID="f1110789bcb3957d71528ee93b6254f58077913089d36bef4130e9b602c660b7" Oct 11 04:13:40 crc kubenswrapper[4798]: I1011 04:13:40.366158 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Oct 11 04:13:40 crc kubenswrapper[4798]: I1011 04:13:40.373029 4798 scope.go:117] "RemoveContainer" containerID="c9b33e786c77b0bf2ca4090d1343706321563f21a36ac0160c23f08366a13784" Oct 11 04:13:40 crc kubenswrapper[4798]: I1011 04:13:40.378648 4798 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Oct 11 04:13:40 crc kubenswrapper[4798]: E1011 04:13:40.380770 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c9b33e786c77b0bf2ca4090d1343706321563f21a36ac0160c23f08366a13784\": container with ID starting with c9b33e786c77b0bf2ca4090d1343706321563f21a36ac0160c23f08366a13784 not found: ID does not exist" containerID="c9b33e786c77b0bf2ca4090d1343706321563f21a36ac0160c23f08366a13784" Oct 11 04:13:40 crc kubenswrapper[4798]: I1011 04:13:40.380818 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c9b33e786c77b0bf2ca4090d1343706321563f21a36ac0160c23f08366a13784"} err="failed to get container status \"c9b33e786c77b0bf2ca4090d1343706321563f21a36ac0160c23f08366a13784\": rpc error: code = NotFound desc = could not find container \"c9b33e786c77b0bf2ca4090d1343706321563f21a36ac0160c23f08366a13784\": container with ID starting with c9b33e786c77b0bf2ca4090d1343706321563f21a36ac0160c23f08366a13784 not found: ID does not exist" Oct 11 04:13:40 crc kubenswrapper[4798]: I1011 04:13:40.380853 4798 scope.go:117] "RemoveContainer" containerID="f1110789bcb3957d71528ee93b6254f58077913089d36bef4130e9b602c660b7" Oct 11 04:13:40 crc kubenswrapper[4798]: E1011 04:13:40.381345 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f1110789bcb3957d71528ee93b6254f58077913089d36bef4130e9b602c660b7\": container with ID starting with f1110789bcb3957d71528ee93b6254f58077913089d36bef4130e9b602c660b7 not found: ID does not exist" containerID="f1110789bcb3957d71528ee93b6254f58077913089d36bef4130e9b602c660b7" Oct 11 04:13:40 crc kubenswrapper[4798]: I1011 04:13:40.381377 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f1110789bcb3957d71528ee93b6254f58077913089d36bef4130e9b602c660b7"} err="failed to get container status \"f1110789bcb3957d71528ee93b6254f58077913089d36bef4130e9b602c660b7\": rpc error: code = NotFound desc = could not find container \"f1110789bcb3957d71528ee93b6254f58077913089d36bef4130e9b602c660b7\": container with ID starting with f1110789bcb3957d71528ee93b6254f58077913089d36bef4130e9b602c660b7 not found: ID does not exist" Oct 11 04:13:40 crc kubenswrapper[4798]: I1011 04:13:40.381405 4798 scope.go:117] "RemoveContainer" containerID="c9b33e786c77b0bf2ca4090d1343706321563f21a36ac0160c23f08366a13784" Oct 11 04:13:40 crc kubenswrapper[4798]: I1011 04:13:40.381618 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c9b33e786c77b0bf2ca4090d1343706321563f21a36ac0160c23f08366a13784"} err="failed to get container status \"c9b33e786c77b0bf2ca4090d1343706321563f21a36ac0160c23f08366a13784\": rpc error: code = NotFound desc = could not find container \"c9b33e786c77b0bf2ca4090d1343706321563f21a36ac0160c23f08366a13784\": container with ID starting with c9b33e786c77b0bf2ca4090d1343706321563f21a36ac0160c23f08366a13784 not found: ID does not exist" Oct 11 04:13:40 crc kubenswrapper[4798]: I1011 04:13:40.381644 4798 scope.go:117] "RemoveContainer" containerID="f1110789bcb3957d71528ee93b6254f58077913089d36bef4130e9b602c660b7" Oct 11 04:13:40 crc kubenswrapper[4798]: I1011 04:13:40.381900 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f1110789bcb3957d71528ee93b6254f58077913089d36bef4130e9b602c660b7"} err="failed to get container status \"f1110789bcb3957d71528ee93b6254f58077913089d36bef4130e9b602c660b7\": rpc error: code = NotFound desc = could not find container \"f1110789bcb3957d71528ee93b6254f58077913089d36bef4130e9b602c660b7\": container with ID starting with f1110789bcb3957d71528ee93b6254f58077913089d36bef4130e9b602c660b7 not found: ID does not exist" Oct 11 04:13:40 crc kubenswrapper[4798]: I1011 04:13:40.390554 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Oct 11 04:13:40 crc kubenswrapper[4798]: E1011 04:13:40.391040 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0a03a628-a6ad-440b-9060-6e561726da91" containerName="nova-metadata-log" Oct 11 04:13:40 crc kubenswrapper[4798]: I1011 04:13:40.391055 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="0a03a628-a6ad-440b-9060-6e561726da91" containerName="nova-metadata-log" Oct 11 04:13:40 crc kubenswrapper[4798]: E1011 04:13:40.391092 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0a03a628-a6ad-440b-9060-6e561726da91" containerName="nova-metadata-metadata" Oct 11 04:13:40 crc kubenswrapper[4798]: I1011 04:13:40.391101 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="0a03a628-a6ad-440b-9060-6e561726da91" containerName="nova-metadata-metadata" Oct 11 04:13:40 crc kubenswrapper[4798]: I1011 04:13:40.391328 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="0a03a628-a6ad-440b-9060-6e561726da91" containerName="nova-metadata-metadata" Oct 11 04:13:40 crc kubenswrapper[4798]: I1011 04:13:40.391355 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="0a03a628-a6ad-440b-9060-6e561726da91" containerName="nova-metadata-log" Oct 11 04:13:40 crc kubenswrapper[4798]: I1011 04:13:40.393281 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Oct 11 04:13:40 crc kubenswrapper[4798]: I1011 04:13:40.403331 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Oct 11 04:13:40 crc kubenswrapper[4798]: I1011 04:13:40.403526 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Oct 11 04:13:40 crc kubenswrapper[4798]: I1011 04:13:40.428873 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Oct 11 04:13:40 crc kubenswrapper[4798]: I1011 04:13:40.506742 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/61518c57-9198-4d3a-b58f-cf2a2de9813d-config-data\") pod \"nova-metadata-0\" (UID: \"61518c57-9198-4d3a-b58f-cf2a2de9813d\") " pod="openstack/nova-metadata-0" Oct 11 04:13:40 crc kubenswrapper[4798]: I1011 04:13:40.506865 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xdfgq\" (UniqueName: \"kubernetes.io/projected/61518c57-9198-4d3a-b58f-cf2a2de9813d-kube-api-access-xdfgq\") pod \"nova-metadata-0\" (UID: \"61518c57-9198-4d3a-b58f-cf2a2de9813d\") " pod="openstack/nova-metadata-0" Oct 11 04:13:40 crc kubenswrapper[4798]: I1011 04:13:40.506906 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/61518c57-9198-4d3a-b58f-cf2a2de9813d-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"61518c57-9198-4d3a-b58f-cf2a2de9813d\") " pod="openstack/nova-metadata-0" Oct 11 04:13:40 crc kubenswrapper[4798]: I1011 04:13:40.506931 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/61518c57-9198-4d3a-b58f-cf2a2de9813d-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"61518c57-9198-4d3a-b58f-cf2a2de9813d\") " pod="openstack/nova-metadata-0" Oct 11 04:13:40 crc kubenswrapper[4798]: I1011 04:13:40.506978 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/61518c57-9198-4d3a-b58f-cf2a2de9813d-logs\") pod \"nova-metadata-0\" (UID: \"61518c57-9198-4d3a-b58f-cf2a2de9813d\") " pod="openstack/nova-metadata-0" Oct 11 04:13:40 crc kubenswrapper[4798]: I1011 04:13:40.609185 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/61518c57-9198-4d3a-b58f-cf2a2de9813d-config-data\") pod \"nova-metadata-0\" (UID: \"61518c57-9198-4d3a-b58f-cf2a2de9813d\") " pod="openstack/nova-metadata-0" Oct 11 04:13:40 crc kubenswrapper[4798]: I1011 04:13:40.609649 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xdfgq\" (UniqueName: \"kubernetes.io/projected/61518c57-9198-4d3a-b58f-cf2a2de9813d-kube-api-access-xdfgq\") pod \"nova-metadata-0\" (UID: \"61518c57-9198-4d3a-b58f-cf2a2de9813d\") " pod="openstack/nova-metadata-0" Oct 11 04:13:40 crc kubenswrapper[4798]: I1011 04:13:40.610196 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/61518c57-9198-4d3a-b58f-cf2a2de9813d-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"61518c57-9198-4d3a-b58f-cf2a2de9813d\") " pod="openstack/nova-metadata-0" Oct 11 04:13:40 crc kubenswrapper[4798]: I1011 04:13:40.610883 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/61518c57-9198-4d3a-b58f-cf2a2de9813d-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"61518c57-9198-4d3a-b58f-cf2a2de9813d\") " pod="openstack/nova-metadata-0" Oct 11 04:13:40 crc kubenswrapper[4798]: I1011 04:13:40.611422 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/61518c57-9198-4d3a-b58f-cf2a2de9813d-logs\") pod \"nova-metadata-0\" (UID: \"61518c57-9198-4d3a-b58f-cf2a2de9813d\") " pod="openstack/nova-metadata-0" Oct 11 04:13:40 crc kubenswrapper[4798]: I1011 04:13:40.612070 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/61518c57-9198-4d3a-b58f-cf2a2de9813d-logs\") pod \"nova-metadata-0\" (UID: \"61518c57-9198-4d3a-b58f-cf2a2de9813d\") " pod="openstack/nova-metadata-0" Oct 11 04:13:40 crc kubenswrapper[4798]: I1011 04:13:40.614678 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/61518c57-9198-4d3a-b58f-cf2a2de9813d-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"61518c57-9198-4d3a-b58f-cf2a2de9813d\") " pod="openstack/nova-metadata-0" Oct 11 04:13:40 crc kubenswrapper[4798]: I1011 04:13:40.615240 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/61518c57-9198-4d3a-b58f-cf2a2de9813d-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"61518c57-9198-4d3a-b58f-cf2a2de9813d\") " pod="openstack/nova-metadata-0" Oct 11 04:13:40 crc kubenswrapper[4798]: I1011 04:13:40.616039 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/61518c57-9198-4d3a-b58f-cf2a2de9813d-config-data\") pod \"nova-metadata-0\" (UID: \"61518c57-9198-4d3a-b58f-cf2a2de9813d\") " pod="openstack/nova-metadata-0" Oct 11 04:13:40 crc kubenswrapper[4798]: I1011 04:13:40.626965 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xdfgq\" (UniqueName: \"kubernetes.io/projected/61518c57-9198-4d3a-b58f-cf2a2de9813d-kube-api-access-xdfgq\") pod \"nova-metadata-0\" (UID: \"61518c57-9198-4d3a-b58f-cf2a2de9813d\") " pod="openstack/nova-metadata-0" Oct 11 04:13:40 crc kubenswrapper[4798]: I1011 04:13:40.734767 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Oct 11 04:13:41 crc kubenswrapper[4798]: I1011 04:13:41.219903 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Oct 11 04:13:41 crc kubenswrapper[4798]: I1011 04:13:41.314540 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"61518c57-9198-4d3a-b58f-cf2a2de9813d","Type":"ContainerStarted","Data":"078771b097f7e4cf6a12268b87b49a022c7da1bfe7ef2930fc090fbfe6353528"} Oct 11 04:13:41 crc kubenswrapper[4798]: I1011 04:13:41.439610 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0a03a628-a6ad-440b-9060-6e561726da91" path="/var/lib/kubelet/pods/0a03a628-a6ad-440b-9060-6e561726da91/volumes" Oct 11 04:13:42 crc kubenswrapper[4798]: I1011 04:13:42.328490 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"61518c57-9198-4d3a-b58f-cf2a2de9813d","Type":"ContainerStarted","Data":"d2b56f188bd48bda3f79f6022d62e24e1875a318800b8be9e99d9e708385ec52"} Oct 11 04:13:42 crc kubenswrapper[4798]: I1011 04:13:42.328994 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"61518c57-9198-4d3a-b58f-cf2a2de9813d","Type":"ContainerStarted","Data":"506a7f3615d3655b41ee26ecc0fe42060063afa3c9e5092e32c7156944a9d235"} Oct 11 04:13:42 crc kubenswrapper[4798]: I1011 04:13:42.330292 4798 generic.go:334] "Generic (PLEG): container finished" podID="2ab3945b-9225-4901-a3ed-57ea8dcaf456" containerID="0ab241daf52edb119b3355fdc6d0a8cc66a5baedfe949bad0a95c5b1529d7866" exitCode=0 Oct 11 04:13:42 crc kubenswrapper[4798]: I1011 04:13:42.330352 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-tqmm5" event={"ID":"2ab3945b-9225-4901-a3ed-57ea8dcaf456","Type":"ContainerDied","Data":"0ab241daf52edb119b3355fdc6d0a8cc66a5baedfe949bad0a95c5b1529d7866"} Oct 11 04:13:42 crc kubenswrapper[4798]: I1011 04:13:42.353836 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.353808279 podStartE2EDuration="2.353808279s" podCreationTimestamp="2025-10-11 04:13:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 04:13:42.349899986 +0000 UTC m=+1117.686189692" watchObservedRunningTime="2025-10-11 04:13:42.353808279 +0000 UTC m=+1117.690097975" Oct 11 04:13:42 crc kubenswrapper[4798]: I1011 04:13:42.506826 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Oct 11 04:13:42 crc kubenswrapper[4798]: I1011 04:13:42.828707 4798 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Oct 11 04:13:42 crc kubenswrapper[4798]: I1011 04:13:42.828773 4798 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Oct 11 04:13:42 crc kubenswrapper[4798]: I1011 04:13:42.880765 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-8b8cf6657-zdg5l" Oct 11 04:13:42 crc kubenswrapper[4798]: I1011 04:13:42.990239 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-58db5546cc-zfln8"] Oct 11 04:13:42 crc kubenswrapper[4798]: I1011 04:13:42.990966 4798 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-58db5546cc-zfln8" podUID="c2b0d4f5-de54-44c6-9c2c-c4285ffbbe78" containerName="dnsmasq-dns" containerID="cri-o://2e80eb6cff4b8143ea305b056d223b400a392f6ca6eb1d542cba8dd6413cc8c5" gracePeriod=10 Oct 11 04:13:43 crc kubenswrapper[4798]: I1011 04:13:43.170845 4798 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Oct 11 04:13:43 crc kubenswrapper[4798]: I1011 04:13:43.170932 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Oct 11 04:13:43 crc kubenswrapper[4798]: I1011 04:13:43.207134 4798 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Oct 11 04:13:43 crc kubenswrapper[4798]: I1011 04:13:43.340387 4798 generic.go:334] "Generic (PLEG): container finished" podID="2da346d6-33cf-4000-8cf9-f269b86bda5d" containerID="14b5a6837034b5a81eac5c1851fe2b0fed4bb2911bc051c6ae5cf941236cbc05" exitCode=0 Oct 11 04:13:43 crc kubenswrapper[4798]: I1011 04:13:43.340510 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-vj9wj" event={"ID":"2da346d6-33cf-4000-8cf9-f269b86bda5d","Type":"ContainerDied","Data":"14b5a6837034b5a81eac5c1851fe2b0fed4bb2911bc051c6ae5cf941236cbc05"} Oct 11 04:13:43 crc kubenswrapper[4798]: I1011 04:13:43.354447 4798 generic.go:334] "Generic (PLEG): container finished" podID="c2b0d4f5-de54-44c6-9c2c-c4285ffbbe78" containerID="2e80eb6cff4b8143ea305b056d223b400a392f6ca6eb1d542cba8dd6413cc8c5" exitCode=0 Oct 11 04:13:43 crc kubenswrapper[4798]: I1011 04:13:43.354506 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-58db5546cc-zfln8" event={"ID":"c2b0d4f5-de54-44c6-9c2c-c4285ffbbe78","Type":"ContainerDied","Data":"2e80eb6cff4b8143ea305b056d223b400a392f6ca6eb1d542cba8dd6413cc8c5"} Oct 11 04:13:43 crc kubenswrapper[4798]: I1011 04:13:43.438130 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Oct 11 04:13:43 crc kubenswrapper[4798]: I1011 04:13:43.525061 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-58db5546cc-zfln8" Oct 11 04:13:43 crc kubenswrapper[4798]: I1011 04:13:43.582569 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c2b0d4f5-de54-44c6-9c2c-c4285ffbbe78-ovsdbserver-sb\") pod \"c2b0d4f5-de54-44c6-9c2c-c4285ffbbe78\" (UID: \"c2b0d4f5-de54-44c6-9c2c-c4285ffbbe78\") " Oct 11 04:13:43 crc kubenswrapper[4798]: I1011 04:13:43.582685 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c2b0d4f5-de54-44c6-9c2c-c4285ffbbe78-ovsdbserver-nb\") pod \"c2b0d4f5-de54-44c6-9c2c-c4285ffbbe78\" (UID: \"c2b0d4f5-de54-44c6-9c2c-c4285ffbbe78\") " Oct 11 04:13:43 crc kubenswrapper[4798]: I1011 04:13:43.582902 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c2b0d4f5-de54-44c6-9c2c-c4285ffbbe78-config\") pod \"c2b0d4f5-de54-44c6-9c2c-c4285ffbbe78\" (UID: \"c2b0d4f5-de54-44c6-9c2c-c4285ffbbe78\") " Oct 11 04:13:43 crc kubenswrapper[4798]: I1011 04:13:43.583088 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l77w8\" (UniqueName: \"kubernetes.io/projected/c2b0d4f5-de54-44c6-9c2c-c4285ffbbe78-kube-api-access-l77w8\") pod \"c2b0d4f5-de54-44c6-9c2c-c4285ffbbe78\" (UID: \"c2b0d4f5-de54-44c6-9c2c-c4285ffbbe78\") " Oct 11 04:13:43 crc kubenswrapper[4798]: I1011 04:13:43.583314 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c2b0d4f5-de54-44c6-9c2c-c4285ffbbe78-dns-svc\") pod \"c2b0d4f5-de54-44c6-9c2c-c4285ffbbe78\" (UID: \"c2b0d4f5-de54-44c6-9c2c-c4285ffbbe78\") " Oct 11 04:13:43 crc kubenswrapper[4798]: I1011 04:13:43.616706 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c2b0d4f5-de54-44c6-9c2c-c4285ffbbe78-kube-api-access-l77w8" (OuterVolumeSpecName: "kube-api-access-l77w8") pod "c2b0d4f5-de54-44c6-9c2c-c4285ffbbe78" (UID: "c2b0d4f5-de54-44c6-9c2c-c4285ffbbe78"). InnerVolumeSpecName "kube-api-access-l77w8". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 04:13:43 crc kubenswrapper[4798]: I1011 04:13:43.658633 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c2b0d4f5-de54-44c6-9c2c-c4285ffbbe78-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "c2b0d4f5-de54-44c6-9c2c-c4285ffbbe78" (UID: "c2b0d4f5-de54-44c6-9c2c-c4285ffbbe78"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 04:13:43 crc kubenswrapper[4798]: I1011 04:13:43.667267 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c2b0d4f5-de54-44c6-9c2c-c4285ffbbe78-config" (OuterVolumeSpecName: "config") pod "c2b0d4f5-de54-44c6-9c2c-c4285ffbbe78" (UID: "c2b0d4f5-de54-44c6-9c2c-c4285ffbbe78"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 04:13:43 crc kubenswrapper[4798]: I1011 04:13:43.667362 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c2b0d4f5-de54-44c6-9c2c-c4285ffbbe78-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "c2b0d4f5-de54-44c6-9c2c-c4285ffbbe78" (UID: "c2b0d4f5-de54-44c6-9c2c-c4285ffbbe78"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 04:13:43 crc kubenswrapper[4798]: I1011 04:13:43.692986 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l77w8\" (UniqueName: \"kubernetes.io/projected/c2b0d4f5-de54-44c6-9c2c-c4285ffbbe78-kube-api-access-l77w8\") on node \"crc\" DevicePath \"\"" Oct 11 04:13:43 crc kubenswrapper[4798]: I1011 04:13:43.693097 4798 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c2b0d4f5-de54-44c6-9c2c-c4285ffbbe78-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Oct 11 04:13:43 crc kubenswrapper[4798]: I1011 04:13:43.693109 4798 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c2b0d4f5-de54-44c6-9c2c-c4285ffbbe78-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Oct 11 04:13:43 crc kubenswrapper[4798]: I1011 04:13:43.693122 4798 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c2b0d4f5-de54-44c6-9c2c-c4285ffbbe78-config\") on node \"crc\" DevicePath \"\"" Oct 11 04:13:43 crc kubenswrapper[4798]: I1011 04:13:43.698526 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c2b0d4f5-de54-44c6-9c2c-c4285ffbbe78-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "c2b0d4f5-de54-44c6-9c2c-c4285ffbbe78" (UID: "c2b0d4f5-de54-44c6-9c2c-c4285ffbbe78"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 04:13:43 crc kubenswrapper[4798]: I1011 04:13:43.717703 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-tqmm5" Oct 11 04:13:43 crc kubenswrapper[4798]: I1011 04:13:43.793970 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2ab3945b-9225-4901-a3ed-57ea8dcaf456-config-data\") pod \"2ab3945b-9225-4901-a3ed-57ea8dcaf456\" (UID: \"2ab3945b-9225-4901-a3ed-57ea8dcaf456\") " Oct 11 04:13:43 crc kubenswrapper[4798]: I1011 04:13:43.794135 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2ab3945b-9225-4901-a3ed-57ea8dcaf456-combined-ca-bundle\") pod \"2ab3945b-9225-4901-a3ed-57ea8dcaf456\" (UID: \"2ab3945b-9225-4901-a3ed-57ea8dcaf456\") " Oct 11 04:13:43 crc kubenswrapper[4798]: I1011 04:13:43.794204 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v9wck\" (UniqueName: \"kubernetes.io/projected/2ab3945b-9225-4901-a3ed-57ea8dcaf456-kube-api-access-v9wck\") pod \"2ab3945b-9225-4901-a3ed-57ea8dcaf456\" (UID: \"2ab3945b-9225-4901-a3ed-57ea8dcaf456\") " Oct 11 04:13:43 crc kubenswrapper[4798]: I1011 04:13:43.794337 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2ab3945b-9225-4901-a3ed-57ea8dcaf456-scripts\") pod \"2ab3945b-9225-4901-a3ed-57ea8dcaf456\" (UID: \"2ab3945b-9225-4901-a3ed-57ea8dcaf456\") " Oct 11 04:13:43 crc kubenswrapper[4798]: I1011 04:13:43.795297 4798 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c2b0d4f5-de54-44c6-9c2c-c4285ffbbe78-dns-svc\") on node \"crc\" DevicePath \"\"" Oct 11 04:13:43 crc kubenswrapper[4798]: I1011 04:13:43.798433 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2ab3945b-9225-4901-a3ed-57ea8dcaf456-scripts" (OuterVolumeSpecName: "scripts") pod "2ab3945b-9225-4901-a3ed-57ea8dcaf456" (UID: "2ab3945b-9225-4901-a3ed-57ea8dcaf456"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:13:43 crc kubenswrapper[4798]: I1011 04:13:43.799920 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2ab3945b-9225-4901-a3ed-57ea8dcaf456-kube-api-access-v9wck" (OuterVolumeSpecName: "kube-api-access-v9wck") pod "2ab3945b-9225-4901-a3ed-57ea8dcaf456" (UID: "2ab3945b-9225-4901-a3ed-57ea8dcaf456"). InnerVolumeSpecName "kube-api-access-v9wck". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 04:13:43 crc kubenswrapper[4798]: I1011 04:13:43.825521 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2ab3945b-9225-4901-a3ed-57ea8dcaf456-config-data" (OuterVolumeSpecName: "config-data") pod "2ab3945b-9225-4901-a3ed-57ea8dcaf456" (UID: "2ab3945b-9225-4901-a3ed-57ea8dcaf456"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:13:43 crc kubenswrapper[4798]: I1011 04:13:43.827708 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2ab3945b-9225-4901-a3ed-57ea8dcaf456-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "2ab3945b-9225-4901-a3ed-57ea8dcaf456" (UID: "2ab3945b-9225-4901-a3ed-57ea8dcaf456"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:13:43 crc kubenswrapper[4798]: I1011 04:13:43.897366 4798 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2ab3945b-9225-4901-a3ed-57ea8dcaf456-scripts\") on node \"crc\" DevicePath \"\"" Oct 11 04:13:43 crc kubenswrapper[4798]: I1011 04:13:43.897434 4798 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2ab3945b-9225-4901-a3ed-57ea8dcaf456-config-data\") on node \"crc\" DevicePath \"\"" Oct 11 04:13:43 crc kubenswrapper[4798]: I1011 04:13:43.897445 4798 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2ab3945b-9225-4901-a3ed-57ea8dcaf456-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 11 04:13:43 crc kubenswrapper[4798]: I1011 04:13:43.897456 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v9wck\" (UniqueName: \"kubernetes.io/projected/2ab3945b-9225-4901-a3ed-57ea8dcaf456-kube-api-access-v9wck\") on node \"crc\" DevicePath \"\"" Oct 11 04:13:43 crc kubenswrapper[4798]: I1011 04:13:43.910670 4798 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="31e86dba-00e8-44be-8039-05c5043c67e1" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.0.172:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Oct 11 04:13:43 crc kubenswrapper[4798]: I1011 04:13:43.910685 4798 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="31e86dba-00e8-44be-8039-05c5043c67e1" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.0.172:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Oct 11 04:13:44 crc kubenswrapper[4798]: I1011 04:13:44.365914 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-tqmm5" event={"ID":"2ab3945b-9225-4901-a3ed-57ea8dcaf456","Type":"ContainerDied","Data":"4e77ffef7eaed2ea7540627cdaf16421623713acec26451f9d323302da2e65b8"} Oct 11 04:13:44 crc kubenswrapper[4798]: I1011 04:13:44.366353 4798 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4e77ffef7eaed2ea7540627cdaf16421623713acec26451f9d323302da2e65b8" Oct 11 04:13:44 crc kubenswrapper[4798]: I1011 04:13:44.365995 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-tqmm5" Oct 11 04:13:44 crc kubenswrapper[4798]: I1011 04:13:44.368409 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-58db5546cc-zfln8" event={"ID":"c2b0d4f5-de54-44c6-9c2c-c4285ffbbe78","Type":"ContainerDied","Data":"e22fef8e8bf92132ae51a020b929b4b538b983a4c36722ddee2a60b6cb265c28"} Oct 11 04:13:44 crc kubenswrapper[4798]: I1011 04:13:44.368439 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-58db5546cc-zfln8" Oct 11 04:13:44 crc kubenswrapper[4798]: I1011 04:13:44.368501 4798 scope.go:117] "RemoveContainer" containerID="2e80eb6cff4b8143ea305b056d223b400a392f6ca6eb1d542cba8dd6413cc8c5" Oct 11 04:13:44 crc kubenswrapper[4798]: I1011 04:13:44.438691 4798 scope.go:117] "RemoveContainer" containerID="67a08c46e15345ff800acdbe8a08dc3819ebe59e5e99c464d309b6f23c60a671" Oct 11 04:13:44 crc kubenswrapper[4798]: I1011 04:13:44.485992 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-58db5546cc-zfln8"] Oct 11 04:13:44 crc kubenswrapper[4798]: I1011 04:13:44.495258 4798 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-58db5546cc-zfln8"] Oct 11 04:13:44 crc kubenswrapper[4798]: I1011 04:13:44.582656 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Oct 11 04:13:44 crc kubenswrapper[4798]: I1011 04:13:44.583008 4798 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="31e86dba-00e8-44be-8039-05c5043c67e1" containerName="nova-api-log" containerID="cri-o://19acbc77625344bd76a48bc2a83efb20a94acd91cf5b3329ccd15ce284b91824" gracePeriod=30 Oct 11 04:13:44 crc kubenswrapper[4798]: I1011 04:13:44.583678 4798 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="31e86dba-00e8-44be-8039-05c5043c67e1" containerName="nova-api-api" containerID="cri-o://872af61a38b33c8ac361866d9e9ed48f138aadb7251adfa87def45ce6d66fc18" gracePeriod=30 Oct 11 04:13:44 crc kubenswrapper[4798]: I1011 04:13:44.599033 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Oct 11 04:13:44 crc kubenswrapper[4798]: I1011 04:13:44.605740 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Oct 11 04:13:44 crc kubenswrapper[4798]: I1011 04:13:44.606006 4798 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="61518c57-9198-4d3a-b58f-cf2a2de9813d" containerName="nova-metadata-log" containerID="cri-o://506a7f3615d3655b41ee26ecc0fe42060063afa3c9e5092e32c7156944a9d235" gracePeriod=30 Oct 11 04:13:44 crc kubenswrapper[4798]: I1011 04:13:44.606556 4798 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="61518c57-9198-4d3a-b58f-cf2a2de9813d" containerName="nova-metadata-metadata" containerID="cri-o://d2b56f188bd48bda3f79f6022d62e24e1875a318800b8be9e99d9e708385ec52" gracePeriod=30 Oct 11 04:13:44 crc kubenswrapper[4798]: I1011 04:13:44.894356 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-vj9wj" Oct 11 04:13:45 crc kubenswrapper[4798]: I1011 04:13:45.049516 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2da346d6-33cf-4000-8cf9-f269b86bda5d-config-data\") pod \"2da346d6-33cf-4000-8cf9-f269b86bda5d\" (UID: \"2da346d6-33cf-4000-8cf9-f269b86bda5d\") " Oct 11 04:13:45 crc kubenswrapper[4798]: I1011 04:13:45.049644 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5xfdl\" (UniqueName: \"kubernetes.io/projected/2da346d6-33cf-4000-8cf9-f269b86bda5d-kube-api-access-5xfdl\") pod \"2da346d6-33cf-4000-8cf9-f269b86bda5d\" (UID: \"2da346d6-33cf-4000-8cf9-f269b86bda5d\") " Oct 11 04:13:45 crc kubenswrapper[4798]: I1011 04:13:45.049718 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2da346d6-33cf-4000-8cf9-f269b86bda5d-scripts\") pod \"2da346d6-33cf-4000-8cf9-f269b86bda5d\" (UID: \"2da346d6-33cf-4000-8cf9-f269b86bda5d\") " Oct 11 04:13:45 crc kubenswrapper[4798]: I1011 04:13:45.049825 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2da346d6-33cf-4000-8cf9-f269b86bda5d-combined-ca-bundle\") pod \"2da346d6-33cf-4000-8cf9-f269b86bda5d\" (UID: \"2da346d6-33cf-4000-8cf9-f269b86bda5d\") " Oct 11 04:13:45 crc kubenswrapper[4798]: I1011 04:13:45.075309 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2da346d6-33cf-4000-8cf9-f269b86bda5d-kube-api-access-5xfdl" (OuterVolumeSpecName: "kube-api-access-5xfdl") pod "2da346d6-33cf-4000-8cf9-f269b86bda5d" (UID: "2da346d6-33cf-4000-8cf9-f269b86bda5d"). InnerVolumeSpecName "kube-api-access-5xfdl". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 04:13:45 crc kubenswrapper[4798]: I1011 04:13:45.075550 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2da346d6-33cf-4000-8cf9-f269b86bda5d-scripts" (OuterVolumeSpecName: "scripts") pod "2da346d6-33cf-4000-8cf9-f269b86bda5d" (UID: "2da346d6-33cf-4000-8cf9-f269b86bda5d"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:13:45 crc kubenswrapper[4798]: I1011 04:13:45.154772 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5xfdl\" (UniqueName: \"kubernetes.io/projected/2da346d6-33cf-4000-8cf9-f269b86bda5d-kube-api-access-5xfdl\") on node \"crc\" DevicePath \"\"" Oct 11 04:13:45 crc kubenswrapper[4798]: I1011 04:13:45.154807 4798 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2da346d6-33cf-4000-8cf9-f269b86bda5d-scripts\") on node \"crc\" DevicePath \"\"" Oct 11 04:13:45 crc kubenswrapper[4798]: I1011 04:13:45.157605 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2da346d6-33cf-4000-8cf9-f269b86bda5d-config-data" (OuterVolumeSpecName: "config-data") pod "2da346d6-33cf-4000-8cf9-f269b86bda5d" (UID: "2da346d6-33cf-4000-8cf9-f269b86bda5d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:13:45 crc kubenswrapper[4798]: I1011 04:13:45.177834 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2da346d6-33cf-4000-8cf9-f269b86bda5d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "2da346d6-33cf-4000-8cf9-f269b86bda5d" (UID: "2da346d6-33cf-4000-8cf9-f269b86bda5d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:13:45 crc kubenswrapper[4798]: I1011 04:13:45.257301 4798 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2da346d6-33cf-4000-8cf9-f269b86bda5d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 11 04:13:45 crc kubenswrapper[4798]: I1011 04:13:45.257340 4798 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2da346d6-33cf-4000-8cf9-f269b86bda5d-config-data\") on node \"crc\" DevicePath \"\"" Oct 11 04:13:45 crc kubenswrapper[4798]: I1011 04:13:45.382863 4798 generic.go:334] "Generic (PLEG): container finished" podID="31e86dba-00e8-44be-8039-05c5043c67e1" containerID="19acbc77625344bd76a48bc2a83efb20a94acd91cf5b3329ccd15ce284b91824" exitCode=143 Oct 11 04:13:45 crc kubenswrapper[4798]: I1011 04:13:45.382954 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"31e86dba-00e8-44be-8039-05c5043c67e1","Type":"ContainerDied","Data":"19acbc77625344bd76a48bc2a83efb20a94acd91cf5b3329ccd15ce284b91824"} Oct 11 04:13:45 crc kubenswrapper[4798]: I1011 04:13:45.384125 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Oct 11 04:13:45 crc kubenswrapper[4798]: I1011 04:13:45.387728 4798 generic.go:334] "Generic (PLEG): container finished" podID="61518c57-9198-4d3a-b58f-cf2a2de9813d" containerID="d2b56f188bd48bda3f79f6022d62e24e1875a318800b8be9e99d9e708385ec52" exitCode=0 Oct 11 04:13:45 crc kubenswrapper[4798]: I1011 04:13:45.387759 4798 generic.go:334] "Generic (PLEG): container finished" podID="61518c57-9198-4d3a-b58f-cf2a2de9813d" containerID="506a7f3615d3655b41ee26ecc0fe42060063afa3c9e5092e32c7156944a9d235" exitCode=143 Oct 11 04:13:45 crc kubenswrapper[4798]: I1011 04:13:45.387802 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"61518c57-9198-4d3a-b58f-cf2a2de9813d","Type":"ContainerDied","Data":"d2b56f188bd48bda3f79f6022d62e24e1875a318800b8be9e99d9e708385ec52"} Oct 11 04:13:45 crc kubenswrapper[4798]: I1011 04:13:45.387832 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"61518c57-9198-4d3a-b58f-cf2a2de9813d","Type":"ContainerDied","Data":"506a7f3615d3655b41ee26ecc0fe42060063afa3c9e5092e32c7156944a9d235"} Oct 11 04:13:45 crc kubenswrapper[4798]: I1011 04:13:45.387842 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"61518c57-9198-4d3a-b58f-cf2a2de9813d","Type":"ContainerDied","Data":"078771b097f7e4cf6a12268b87b49a022c7da1bfe7ef2930fc090fbfe6353528"} Oct 11 04:13:45 crc kubenswrapper[4798]: I1011 04:13:45.387876 4798 scope.go:117] "RemoveContainer" containerID="d2b56f188bd48bda3f79f6022d62e24e1875a318800b8be9e99d9e708385ec52" Oct 11 04:13:45 crc kubenswrapper[4798]: I1011 04:13:45.394203 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-vj9wj" event={"ID":"2da346d6-33cf-4000-8cf9-f269b86bda5d","Type":"ContainerDied","Data":"e6b9b5de953b5ed3490c5b502959940bc8d50d234082bbfe95aad936c06d24af"} Oct 11 04:13:45 crc kubenswrapper[4798]: I1011 04:13:45.394240 4798 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="c4ba238e-3ec7-4703-862c-a1f14abda9c9" containerName="nova-scheduler-scheduler" containerID="cri-o://20ab35a996cf54a1a9321e38b400af1b14c71359b63cdf0a94c3b9aa76e08dc4" gracePeriod=30 Oct 11 04:13:45 crc kubenswrapper[4798]: I1011 04:13:45.394296 4798 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e6b9b5de953b5ed3490c5b502959940bc8d50d234082bbfe95aad936c06d24af" Oct 11 04:13:45 crc kubenswrapper[4798]: I1011 04:13:45.394222 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-vj9wj" Oct 11 04:13:45 crc kubenswrapper[4798]: I1011 04:13:45.461196 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/61518c57-9198-4d3a-b58f-cf2a2de9813d-combined-ca-bundle\") pod \"61518c57-9198-4d3a-b58f-cf2a2de9813d\" (UID: \"61518c57-9198-4d3a-b58f-cf2a2de9813d\") " Oct 11 04:13:45 crc kubenswrapper[4798]: I1011 04:13:45.461354 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/61518c57-9198-4d3a-b58f-cf2a2de9813d-nova-metadata-tls-certs\") pod \"61518c57-9198-4d3a-b58f-cf2a2de9813d\" (UID: \"61518c57-9198-4d3a-b58f-cf2a2de9813d\") " Oct 11 04:13:45 crc kubenswrapper[4798]: I1011 04:13:45.461502 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xdfgq\" (UniqueName: \"kubernetes.io/projected/61518c57-9198-4d3a-b58f-cf2a2de9813d-kube-api-access-xdfgq\") pod \"61518c57-9198-4d3a-b58f-cf2a2de9813d\" (UID: \"61518c57-9198-4d3a-b58f-cf2a2de9813d\") " Oct 11 04:13:45 crc kubenswrapper[4798]: I1011 04:13:45.461616 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/61518c57-9198-4d3a-b58f-cf2a2de9813d-logs\") pod \"61518c57-9198-4d3a-b58f-cf2a2de9813d\" (UID: \"61518c57-9198-4d3a-b58f-cf2a2de9813d\") " Oct 11 04:13:45 crc kubenswrapper[4798]: I1011 04:13:45.461662 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/61518c57-9198-4d3a-b58f-cf2a2de9813d-config-data\") pod \"61518c57-9198-4d3a-b58f-cf2a2de9813d\" (UID: \"61518c57-9198-4d3a-b58f-cf2a2de9813d\") " Oct 11 04:13:45 crc kubenswrapper[4798]: I1011 04:13:45.465133 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/61518c57-9198-4d3a-b58f-cf2a2de9813d-logs" (OuterVolumeSpecName: "logs") pod "61518c57-9198-4d3a-b58f-cf2a2de9813d" (UID: "61518c57-9198-4d3a-b58f-cf2a2de9813d"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 04:13:45 crc kubenswrapper[4798]: I1011 04:13:45.474082 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/61518c57-9198-4d3a-b58f-cf2a2de9813d-kube-api-access-xdfgq" (OuterVolumeSpecName: "kube-api-access-xdfgq") pod "61518c57-9198-4d3a-b58f-cf2a2de9813d" (UID: "61518c57-9198-4d3a-b58f-cf2a2de9813d"). InnerVolumeSpecName "kube-api-access-xdfgq". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 04:13:45 crc kubenswrapper[4798]: I1011 04:13:45.482562 4798 scope.go:117] "RemoveContainer" containerID="506a7f3615d3655b41ee26ecc0fe42060063afa3c9e5092e32c7156944a9d235" Oct 11 04:13:45 crc kubenswrapper[4798]: I1011 04:13:45.484020 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c2b0d4f5-de54-44c6-9c2c-c4285ffbbe78" path="/var/lib/kubelet/pods/c2b0d4f5-de54-44c6-9c2c-c4285ffbbe78/volumes" Oct 11 04:13:45 crc kubenswrapper[4798]: I1011 04:13:45.494404 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-0"] Oct 11 04:13:45 crc kubenswrapper[4798]: E1011 04:13:45.494848 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="61518c57-9198-4d3a-b58f-cf2a2de9813d" containerName="nova-metadata-metadata" Oct 11 04:13:45 crc kubenswrapper[4798]: I1011 04:13:45.494869 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="61518c57-9198-4d3a-b58f-cf2a2de9813d" containerName="nova-metadata-metadata" Oct 11 04:13:45 crc kubenswrapper[4798]: E1011 04:13:45.494884 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2da346d6-33cf-4000-8cf9-f269b86bda5d" containerName="nova-cell1-conductor-db-sync" Oct 11 04:13:45 crc kubenswrapper[4798]: I1011 04:13:45.494890 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="2da346d6-33cf-4000-8cf9-f269b86bda5d" containerName="nova-cell1-conductor-db-sync" Oct 11 04:13:45 crc kubenswrapper[4798]: E1011 04:13:45.494906 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c2b0d4f5-de54-44c6-9c2c-c4285ffbbe78" containerName="dnsmasq-dns" Oct 11 04:13:45 crc kubenswrapper[4798]: I1011 04:13:45.494912 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="c2b0d4f5-de54-44c6-9c2c-c4285ffbbe78" containerName="dnsmasq-dns" Oct 11 04:13:45 crc kubenswrapper[4798]: E1011 04:13:45.494931 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2ab3945b-9225-4901-a3ed-57ea8dcaf456" containerName="nova-manage" Oct 11 04:13:45 crc kubenswrapper[4798]: I1011 04:13:45.494938 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="2ab3945b-9225-4901-a3ed-57ea8dcaf456" containerName="nova-manage" Oct 11 04:13:45 crc kubenswrapper[4798]: E1011 04:13:45.494955 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c2b0d4f5-de54-44c6-9c2c-c4285ffbbe78" containerName="init" Oct 11 04:13:45 crc kubenswrapper[4798]: I1011 04:13:45.494960 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="c2b0d4f5-de54-44c6-9c2c-c4285ffbbe78" containerName="init" Oct 11 04:13:45 crc kubenswrapper[4798]: E1011 04:13:45.494972 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="61518c57-9198-4d3a-b58f-cf2a2de9813d" containerName="nova-metadata-log" Oct 11 04:13:45 crc kubenswrapper[4798]: I1011 04:13:45.494978 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="61518c57-9198-4d3a-b58f-cf2a2de9813d" containerName="nova-metadata-log" Oct 11 04:13:45 crc kubenswrapper[4798]: I1011 04:13:45.495131 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="61518c57-9198-4d3a-b58f-cf2a2de9813d" containerName="nova-metadata-log" Oct 11 04:13:45 crc kubenswrapper[4798]: I1011 04:13:45.495141 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="2da346d6-33cf-4000-8cf9-f269b86bda5d" containerName="nova-cell1-conductor-db-sync" Oct 11 04:13:45 crc kubenswrapper[4798]: I1011 04:13:45.495153 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="2ab3945b-9225-4901-a3ed-57ea8dcaf456" containerName="nova-manage" Oct 11 04:13:45 crc kubenswrapper[4798]: I1011 04:13:45.495164 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="c2b0d4f5-de54-44c6-9c2c-c4285ffbbe78" containerName="dnsmasq-dns" Oct 11 04:13:45 crc kubenswrapper[4798]: I1011 04:13:45.495175 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="61518c57-9198-4d3a-b58f-cf2a2de9813d" containerName="nova-metadata-metadata" Oct 11 04:13:45 crc kubenswrapper[4798]: I1011 04:13:45.497849 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Oct 11 04:13:45 crc kubenswrapper[4798]: I1011 04:13:45.504105 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Oct 11 04:13:45 crc kubenswrapper[4798]: I1011 04:13:45.506486 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/61518c57-9198-4d3a-b58f-cf2a2de9813d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "61518c57-9198-4d3a-b58f-cf2a2de9813d" (UID: "61518c57-9198-4d3a-b58f-cf2a2de9813d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:13:45 crc kubenswrapper[4798]: I1011 04:13:45.528826 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Oct 11 04:13:45 crc kubenswrapper[4798]: I1011 04:13:45.539130 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/61518c57-9198-4d3a-b58f-cf2a2de9813d-config-data" (OuterVolumeSpecName: "config-data") pod "61518c57-9198-4d3a-b58f-cf2a2de9813d" (UID: "61518c57-9198-4d3a-b58f-cf2a2de9813d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:13:45 crc kubenswrapper[4798]: I1011 04:13:45.544067 4798 scope.go:117] "RemoveContainer" containerID="d2b56f188bd48bda3f79f6022d62e24e1875a318800b8be9e99d9e708385ec52" Oct 11 04:13:45 crc kubenswrapper[4798]: E1011 04:13:45.545590 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d2b56f188bd48bda3f79f6022d62e24e1875a318800b8be9e99d9e708385ec52\": container with ID starting with d2b56f188bd48bda3f79f6022d62e24e1875a318800b8be9e99d9e708385ec52 not found: ID does not exist" containerID="d2b56f188bd48bda3f79f6022d62e24e1875a318800b8be9e99d9e708385ec52" Oct 11 04:13:45 crc kubenswrapper[4798]: I1011 04:13:45.545653 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d2b56f188bd48bda3f79f6022d62e24e1875a318800b8be9e99d9e708385ec52"} err="failed to get container status \"d2b56f188bd48bda3f79f6022d62e24e1875a318800b8be9e99d9e708385ec52\": rpc error: code = NotFound desc = could not find container \"d2b56f188bd48bda3f79f6022d62e24e1875a318800b8be9e99d9e708385ec52\": container with ID starting with d2b56f188bd48bda3f79f6022d62e24e1875a318800b8be9e99d9e708385ec52 not found: ID does not exist" Oct 11 04:13:45 crc kubenswrapper[4798]: I1011 04:13:45.545695 4798 scope.go:117] "RemoveContainer" containerID="506a7f3615d3655b41ee26ecc0fe42060063afa3c9e5092e32c7156944a9d235" Oct 11 04:13:45 crc kubenswrapper[4798]: E1011 04:13:45.546055 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"506a7f3615d3655b41ee26ecc0fe42060063afa3c9e5092e32c7156944a9d235\": container with ID starting with 506a7f3615d3655b41ee26ecc0fe42060063afa3c9e5092e32c7156944a9d235 not found: ID does not exist" containerID="506a7f3615d3655b41ee26ecc0fe42060063afa3c9e5092e32c7156944a9d235" Oct 11 04:13:45 crc kubenswrapper[4798]: I1011 04:13:45.546109 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"506a7f3615d3655b41ee26ecc0fe42060063afa3c9e5092e32c7156944a9d235"} err="failed to get container status \"506a7f3615d3655b41ee26ecc0fe42060063afa3c9e5092e32c7156944a9d235\": rpc error: code = NotFound desc = could not find container \"506a7f3615d3655b41ee26ecc0fe42060063afa3c9e5092e32c7156944a9d235\": container with ID starting with 506a7f3615d3655b41ee26ecc0fe42060063afa3c9e5092e32c7156944a9d235 not found: ID does not exist" Oct 11 04:13:45 crc kubenswrapper[4798]: I1011 04:13:45.546141 4798 scope.go:117] "RemoveContainer" containerID="d2b56f188bd48bda3f79f6022d62e24e1875a318800b8be9e99d9e708385ec52" Oct 11 04:13:45 crc kubenswrapper[4798]: I1011 04:13:45.546467 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d2b56f188bd48bda3f79f6022d62e24e1875a318800b8be9e99d9e708385ec52"} err="failed to get container status \"d2b56f188bd48bda3f79f6022d62e24e1875a318800b8be9e99d9e708385ec52\": rpc error: code = NotFound desc = could not find container \"d2b56f188bd48bda3f79f6022d62e24e1875a318800b8be9e99d9e708385ec52\": container with ID starting with d2b56f188bd48bda3f79f6022d62e24e1875a318800b8be9e99d9e708385ec52 not found: ID does not exist" Oct 11 04:13:45 crc kubenswrapper[4798]: I1011 04:13:45.546494 4798 scope.go:117] "RemoveContainer" containerID="506a7f3615d3655b41ee26ecc0fe42060063afa3c9e5092e32c7156944a9d235" Oct 11 04:13:45 crc kubenswrapper[4798]: I1011 04:13:45.546880 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"506a7f3615d3655b41ee26ecc0fe42060063afa3c9e5092e32c7156944a9d235"} err="failed to get container status \"506a7f3615d3655b41ee26ecc0fe42060063afa3c9e5092e32c7156944a9d235\": rpc error: code = NotFound desc = could not find container \"506a7f3615d3655b41ee26ecc0fe42060063afa3c9e5092e32c7156944a9d235\": container with ID starting with 506a7f3615d3655b41ee26ecc0fe42060063afa3c9e5092e32c7156944a9d235 not found: ID does not exist" Oct 11 04:13:45 crc kubenswrapper[4798]: I1011 04:13:45.564115 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0206c382-c230-4d24-8d8e-7744bc1c1209-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"0206c382-c230-4d24-8d8e-7744bc1c1209\") " pod="openstack/nova-cell1-conductor-0" Oct 11 04:13:45 crc kubenswrapper[4798]: I1011 04:13:45.564189 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0206c382-c230-4d24-8d8e-7744bc1c1209-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"0206c382-c230-4d24-8d8e-7744bc1c1209\") " pod="openstack/nova-cell1-conductor-0" Oct 11 04:13:45 crc kubenswrapper[4798]: I1011 04:13:45.564218 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lrf7d\" (UniqueName: \"kubernetes.io/projected/0206c382-c230-4d24-8d8e-7744bc1c1209-kube-api-access-lrf7d\") pod \"nova-cell1-conductor-0\" (UID: \"0206c382-c230-4d24-8d8e-7744bc1c1209\") " pod="openstack/nova-cell1-conductor-0" Oct 11 04:13:45 crc kubenswrapper[4798]: I1011 04:13:45.564492 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xdfgq\" (UniqueName: \"kubernetes.io/projected/61518c57-9198-4d3a-b58f-cf2a2de9813d-kube-api-access-xdfgq\") on node \"crc\" DevicePath \"\"" Oct 11 04:13:45 crc kubenswrapper[4798]: I1011 04:13:45.564508 4798 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/61518c57-9198-4d3a-b58f-cf2a2de9813d-logs\") on node \"crc\" DevicePath \"\"" Oct 11 04:13:45 crc kubenswrapper[4798]: I1011 04:13:45.564518 4798 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/61518c57-9198-4d3a-b58f-cf2a2de9813d-config-data\") on node \"crc\" DevicePath \"\"" Oct 11 04:13:45 crc kubenswrapper[4798]: I1011 04:13:45.564528 4798 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/61518c57-9198-4d3a-b58f-cf2a2de9813d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 11 04:13:45 crc kubenswrapper[4798]: I1011 04:13:45.576601 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/61518c57-9198-4d3a-b58f-cf2a2de9813d-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "61518c57-9198-4d3a-b58f-cf2a2de9813d" (UID: "61518c57-9198-4d3a-b58f-cf2a2de9813d"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:13:45 crc kubenswrapper[4798]: I1011 04:13:45.665846 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0206c382-c230-4d24-8d8e-7744bc1c1209-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"0206c382-c230-4d24-8d8e-7744bc1c1209\") " pod="openstack/nova-cell1-conductor-0" Oct 11 04:13:45 crc kubenswrapper[4798]: I1011 04:13:45.665947 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0206c382-c230-4d24-8d8e-7744bc1c1209-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"0206c382-c230-4d24-8d8e-7744bc1c1209\") " pod="openstack/nova-cell1-conductor-0" Oct 11 04:13:45 crc kubenswrapper[4798]: I1011 04:13:45.665976 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lrf7d\" (UniqueName: \"kubernetes.io/projected/0206c382-c230-4d24-8d8e-7744bc1c1209-kube-api-access-lrf7d\") pod \"nova-cell1-conductor-0\" (UID: \"0206c382-c230-4d24-8d8e-7744bc1c1209\") " pod="openstack/nova-cell1-conductor-0" Oct 11 04:13:45 crc kubenswrapper[4798]: I1011 04:13:45.666088 4798 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/61518c57-9198-4d3a-b58f-cf2a2de9813d-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Oct 11 04:13:45 crc kubenswrapper[4798]: I1011 04:13:45.671935 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0206c382-c230-4d24-8d8e-7744bc1c1209-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"0206c382-c230-4d24-8d8e-7744bc1c1209\") " pod="openstack/nova-cell1-conductor-0" Oct 11 04:13:45 crc kubenswrapper[4798]: I1011 04:13:45.672292 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0206c382-c230-4d24-8d8e-7744bc1c1209-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"0206c382-c230-4d24-8d8e-7744bc1c1209\") " pod="openstack/nova-cell1-conductor-0" Oct 11 04:13:45 crc kubenswrapper[4798]: I1011 04:13:45.684454 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lrf7d\" (UniqueName: \"kubernetes.io/projected/0206c382-c230-4d24-8d8e-7744bc1c1209-kube-api-access-lrf7d\") pod \"nova-cell1-conductor-0\" (UID: \"0206c382-c230-4d24-8d8e-7744bc1c1209\") " pod="openstack/nova-cell1-conductor-0" Oct 11 04:13:45 crc kubenswrapper[4798]: I1011 04:13:45.841998 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Oct 11 04:13:46 crc kubenswrapper[4798]: I1011 04:13:46.331908 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Oct 11 04:13:46 crc kubenswrapper[4798]: I1011 04:13:46.406938 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"0206c382-c230-4d24-8d8e-7744bc1c1209","Type":"ContainerStarted","Data":"9159891b71dcce057edfd4de9ec29b3b67fae1f409ca81450725004535da3158"} Oct 11 04:13:46 crc kubenswrapper[4798]: I1011 04:13:46.409148 4798 generic.go:334] "Generic (PLEG): container finished" podID="c4ba238e-3ec7-4703-862c-a1f14abda9c9" containerID="20ab35a996cf54a1a9321e38b400af1b14c71359b63cdf0a94c3b9aa76e08dc4" exitCode=0 Oct 11 04:13:46 crc kubenswrapper[4798]: I1011 04:13:46.409221 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"c4ba238e-3ec7-4703-862c-a1f14abda9c9","Type":"ContainerDied","Data":"20ab35a996cf54a1a9321e38b400af1b14c71359b63cdf0a94c3b9aa76e08dc4"} Oct 11 04:13:46 crc kubenswrapper[4798]: I1011 04:13:46.410826 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Oct 11 04:13:46 crc kubenswrapper[4798]: I1011 04:13:46.474655 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Oct 11 04:13:46 crc kubenswrapper[4798]: I1011 04:13:46.498727 4798 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Oct 11 04:13:46 crc kubenswrapper[4798]: I1011 04:13:46.511125 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Oct 11 04:13:46 crc kubenswrapper[4798]: I1011 04:13:46.513009 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Oct 11 04:13:46 crc kubenswrapper[4798]: I1011 04:13:46.516243 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Oct 11 04:13:46 crc kubenswrapper[4798]: I1011 04:13:46.516609 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Oct 11 04:13:46 crc kubenswrapper[4798]: I1011 04:13:46.521229 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Oct 11 04:13:46 crc kubenswrapper[4798]: I1011 04:13:46.546366 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Oct 11 04:13:46 crc kubenswrapper[4798]: I1011 04:13:46.594504 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8048fd5b-4d35-45d2-8120-233af208d713-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"8048fd5b-4d35-45d2-8120-233af208d713\") " pod="openstack/nova-metadata-0" Oct 11 04:13:46 crc kubenswrapper[4798]: I1011 04:13:46.594844 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/8048fd5b-4d35-45d2-8120-233af208d713-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"8048fd5b-4d35-45d2-8120-233af208d713\") " pod="openstack/nova-metadata-0" Oct 11 04:13:46 crc kubenswrapper[4798]: I1011 04:13:46.594953 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8048fd5b-4d35-45d2-8120-233af208d713-config-data\") pod \"nova-metadata-0\" (UID: \"8048fd5b-4d35-45d2-8120-233af208d713\") " pod="openstack/nova-metadata-0" Oct 11 04:13:46 crc kubenswrapper[4798]: I1011 04:13:46.595066 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8048fd5b-4d35-45d2-8120-233af208d713-logs\") pod \"nova-metadata-0\" (UID: \"8048fd5b-4d35-45d2-8120-233af208d713\") " pod="openstack/nova-metadata-0" Oct 11 04:13:46 crc kubenswrapper[4798]: I1011 04:13:46.595173 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lc6px\" (UniqueName: \"kubernetes.io/projected/8048fd5b-4d35-45d2-8120-233af208d713-kube-api-access-lc6px\") pod \"nova-metadata-0\" (UID: \"8048fd5b-4d35-45d2-8120-233af208d713\") " pod="openstack/nova-metadata-0" Oct 11 04:13:46 crc kubenswrapper[4798]: I1011 04:13:46.696461 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c4ba238e-3ec7-4703-862c-a1f14abda9c9-config-data\") pod \"c4ba238e-3ec7-4703-862c-a1f14abda9c9\" (UID: \"c4ba238e-3ec7-4703-862c-a1f14abda9c9\") " Oct 11 04:13:46 crc kubenswrapper[4798]: I1011 04:13:46.699102 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c4ba238e-3ec7-4703-862c-a1f14abda9c9-combined-ca-bundle\") pod \"c4ba238e-3ec7-4703-862c-a1f14abda9c9\" (UID: \"c4ba238e-3ec7-4703-862c-a1f14abda9c9\") " Oct 11 04:13:46 crc kubenswrapper[4798]: I1011 04:13:46.699317 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6jrg4\" (UniqueName: \"kubernetes.io/projected/c4ba238e-3ec7-4703-862c-a1f14abda9c9-kube-api-access-6jrg4\") pod \"c4ba238e-3ec7-4703-862c-a1f14abda9c9\" (UID: \"c4ba238e-3ec7-4703-862c-a1f14abda9c9\") " Oct 11 04:13:46 crc kubenswrapper[4798]: I1011 04:13:46.700091 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8048fd5b-4d35-45d2-8120-233af208d713-logs\") pod \"nova-metadata-0\" (UID: \"8048fd5b-4d35-45d2-8120-233af208d713\") " pod="openstack/nova-metadata-0" Oct 11 04:13:46 crc kubenswrapper[4798]: I1011 04:13:46.700163 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lc6px\" (UniqueName: \"kubernetes.io/projected/8048fd5b-4d35-45d2-8120-233af208d713-kube-api-access-lc6px\") pod \"nova-metadata-0\" (UID: \"8048fd5b-4d35-45d2-8120-233af208d713\") " pod="openstack/nova-metadata-0" Oct 11 04:13:46 crc kubenswrapper[4798]: I1011 04:13:46.700539 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8048fd5b-4d35-45d2-8120-233af208d713-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"8048fd5b-4d35-45d2-8120-233af208d713\") " pod="openstack/nova-metadata-0" Oct 11 04:13:46 crc kubenswrapper[4798]: I1011 04:13:46.700657 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/8048fd5b-4d35-45d2-8120-233af208d713-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"8048fd5b-4d35-45d2-8120-233af208d713\") " pod="openstack/nova-metadata-0" Oct 11 04:13:46 crc kubenswrapper[4798]: I1011 04:13:46.700690 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8048fd5b-4d35-45d2-8120-233af208d713-config-data\") pod \"nova-metadata-0\" (UID: \"8048fd5b-4d35-45d2-8120-233af208d713\") " pod="openstack/nova-metadata-0" Oct 11 04:13:46 crc kubenswrapper[4798]: I1011 04:13:46.700716 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8048fd5b-4d35-45d2-8120-233af208d713-logs\") pod \"nova-metadata-0\" (UID: \"8048fd5b-4d35-45d2-8120-233af208d713\") " pod="openstack/nova-metadata-0" Oct 11 04:13:46 crc kubenswrapper[4798]: I1011 04:13:46.705179 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8048fd5b-4d35-45d2-8120-233af208d713-config-data\") pod \"nova-metadata-0\" (UID: \"8048fd5b-4d35-45d2-8120-233af208d713\") " pod="openstack/nova-metadata-0" Oct 11 04:13:46 crc kubenswrapper[4798]: I1011 04:13:46.705405 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8048fd5b-4d35-45d2-8120-233af208d713-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"8048fd5b-4d35-45d2-8120-233af208d713\") " pod="openstack/nova-metadata-0" Oct 11 04:13:46 crc kubenswrapper[4798]: I1011 04:13:46.706159 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c4ba238e-3ec7-4703-862c-a1f14abda9c9-kube-api-access-6jrg4" (OuterVolumeSpecName: "kube-api-access-6jrg4") pod "c4ba238e-3ec7-4703-862c-a1f14abda9c9" (UID: "c4ba238e-3ec7-4703-862c-a1f14abda9c9"). InnerVolumeSpecName "kube-api-access-6jrg4". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 04:13:46 crc kubenswrapper[4798]: I1011 04:13:46.709197 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/8048fd5b-4d35-45d2-8120-233af208d713-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"8048fd5b-4d35-45d2-8120-233af208d713\") " pod="openstack/nova-metadata-0" Oct 11 04:13:46 crc kubenswrapper[4798]: I1011 04:13:46.717165 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lc6px\" (UniqueName: \"kubernetes.io/projected/8048fd5b-4d35-45d2-8120-233af208d713-kube-api-access-lc6px\") pod \"nova-metadata-0\" (UID: \"8048fd5b-4d35-45d2-8120-233af208d713\") " pod="openstack/nova-metadata-0" Oct 11 04:13:46 crc kubenswrapper[4798]: I1011 04:13:46.736769 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c4ba238e-3ec7-4703-862c-a1f14abda9c9-config-data" (OuterVolumeSpecName: "config-data") pod "c4ba238e-3ec7-4703-862c-a1f14abda9c9" (UID: "c4ba238e-3ec7-4703-862c-a1f14abda9c9"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:13:46 crc kubenswrapper[4798]: I1011 04:13:46.745317 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c4ba238e-3ec7-4703-862c-a1f14abda9c9-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c4ba238e-3ec7-4703-862c-a1f14abda9c9" (UID: "c4ba238e-3ec7-4703-862c-a1f14abda9c9"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:13:46 crc kubenswrapper[4798]: I1011 04:13:46.801917 4798 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c4ba238e-3ec7-4703-862c-a1f14abda9c9-config-data\") on node \"crc\" DevicePath \"\"" Oct 11 04:13:46 crc kubenswrapper[4798]: I1011 04:13:46.802154 4798 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c4ba238e-3ec7-4703-862c-a1f14abda9c9-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 11 04:13:46 crc kubenswrapper[4798]: I1011 04:13:46.802259 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6jrg4\" (UniqueName: \"kubernetes.io/projected/c4ba238e-3ec7-4703-862c-a1f14abda9c9-kube-api-access-6jrg4\") on node \"crc\" DevicePath \"\"" Oct 11 04:13:46 crc kubenswrapper[4798]: I1011 04:13:46.869689 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Oct 11 04:13:47 crc kubenswrapper[4798]: I1011 04:13:47.429454 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Oct 11 04:13:47 crc kubenswrapper[4798]: I1011 04:13:47.440776 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="61518c57-9198-4d3a-b58f-cf2a2de9813d" path="/var/lib/kubelet/pods/61518c57-9198-4d3a-b58f-cf2a2de9813d/volumes" Oct 11 04:13:47 crc kubenswrapper[4798]: I1011 04:13:47.441897 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-conductor-0" Oct 11 04:13:47 crc kubenswrapper[4798]: I1011 04:13:47.441932 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"0206c382-c230-4d24-8d8e-7744bc1c1209","Type":"ContainerStarted","Data":"c0fd1fea5769ba8e101fc741ae8226303198cd632cfc738ffc110a2d0dfe7497"} Oct 11 04:13:47 crc kubenswrapper[4798]: I1011 04:13:47.441957 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Oct 11 04:13:47 crc kubenswrapper[4798]: I1011 04:13:47.441979 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"c4ba238e-3ec7-4703-862c-a1f14abda9c9","Type":"ContainerDied","Data":"3cbc1cddafb0d0adb7881b49de4a92268dea769dbd8f40f6b6084b6c0747f614"} Oct 11 04:13:47 crc kubenswrapper[4798]: I1011 04:13:47.442011 4798 scope.go:117] "RemoveContainer" containerID="20ab35a996cf54a1a9321e38b400af1b14c71359b63cdf0a94c3b9aa76e08dc4" Oct 11 04:13:47 crc kubenswrapper[4798]: I1011 04:13:47.465935 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-0" podStartSLOduration=2.465902667 podStartE2EDuration="2.465902667s" podCreationTimestamp="2025-10-11 04:13:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 04:13:47.453077522 +0000 UTC m=+1122.789367228" watchObservedRunningTime="2025-10-11 04:13:47.465902667 +0000 UTC m=+1122.802192373" Oct 11 04:13:48 crc kubenswrapper[4798]: I1011 04:13:48.476586 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"8048fd5b-4d35-45d2-8120-233af208d713","Type":"ContainerStarted","Data":"5d8e5d353fdf27cbc770ea9ddefabf016d056ecd8cc3bf5c6cdda2be41de405d"} Oct 11 04:13:48 crc kubenswrapper[4798]: I1011 04:13:48.479481 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"8048fd5b-4d35-45d2-8120-233af208d713","Type":"ContainerStarted","Data":"b501c2408606b0754f13093b47a67a9b4c25676c5b419c257b70fc90a77a3384"} Oct 11 04:13:48 crc kubenswrapper[4798]: I1011 04:13:48.479536 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"8048fd5b-4d35-45d2-8120-233af208d713","Type":"ContainerStarted","Data":"54d0d749caffc9b8411eb3f33f7f2eabee7e8ec713b7625090631a37253a44ab"} Oct 11 04:13:48 crc kubenswrapper[4798]: I1011 04:13:48.512375 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.512352934 podStartE2EDuration="2.512352934s" podCreationTimestamp="2025-10-11 04:13:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 04:13:48.509637459 +0000 UTC m=+1123.845927135" watchObservedRunningTime="2025-10-11 04:13:48.512352934 +0000 UTC m=+1123.848642620" Oct 11 04:13:49 crc kubenswrapper[4798]: I1011 04:13:49.501774 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Oct 11 04:13:49 crc kubenswrapper[4798]: I1011 04:13:49.504673 4798 generic.go:334] "Generic (PLEG): container finished" podID="31e86dba-00e8-44be-8039-05c5043c67e1" containerID="872af61a38b33c8ac361866d9e9ed48f138aadb7251adfa87def45ce6d66fc18" exitCode=0 Oct 11 04:13:49 crc kubenswrapper[4798]: I1011 04:13:49.504856 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"31e86dba-00e8-44be-8039-05c5043c67e1","Type":"ContainerDied","Data":"872af61a38b33c8ac361866d9e9ed48f138aadb7251adfa87def45ce6d66fc18"} Oct 11 04:13:49 crc kubenswrapper[4798]: I1011 04:13:49.504894 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"31e86dba-00e8-44be-8039-05c5043c67e1","Type":"ContainerDied","Data":"c1e847f1a6c50fff69b893875f7905b5bf7247cb7e8192ea557854ab446fdff5"} Oct 11 04:13:49 crc kubenswrapper[4798]: I1011 04:13:49.504915 4798 scope.go:117] "RemoveContainer" containerID="872af61a38b33c8ac361866d9e9ed48f138aadb7251adfa87def45ce6d66fc18" Oct 11 04:13:49 crc kubenswrapper[4798]: I1011 04:13:49.535067 4798 scope.go:117] "RemoveContainer" containerID="19acbc77625344bd76a48bc2a83efb20a94acd91cf5b3329ccd15ce284b91824" Oct 11 04:13:49 crc kubenswrapper[4798]: I1011 04:13:49.564817 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/31e86dba-00e8-44be-8039-05c5043c67e1-config-data\") pod \"31e86dba-00e8-44be-8039-05c5043c67e1\" (UID: \"31e86dba-00e8-44be-8039-05c5043c67e1\") " Oct 11 04:13:49 crc kubenswrapper[4798]: I1011 04:13:49.565011 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/31e86dba-00e8-44be-8039-05c5043c67e1-combined-ca-bundle\") pod \"31e86dba-00e8-44be-8039-05c5043c67e1\" (UID: \"31e86dba-00e8-44be-8039-05c5043c67e1\") " Oct 11 04:13:49 crc kubenswrapper[4798]: I1011 04:13:49.565062 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/31e86dba-00e8-44be-8039-05c5043c67e1-logs\") pod \"31e86dba-00e8-44be-8039-05c5043c67e1\" (UID: \"31e86dba-00e8-44be-8039-05c5043c67e1\") " Oct 11 04:13:49 crc kubenswrapper[4798]: I1011 04:13:49.565152 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-j4r2g\" (UniqueName: \"kubernetes.io/projected/31e86dba-00e8-44be-8039-05c5043c67e1-kube-api-access-j4r2g\") pod \"31e86dba-00e8-44be-8039-05c5043c67e1\" (UID: \"31e86dba-00e8-44be-8039-05c5043c67e1\") " Oct 11 04:13:49 crc kubenswrapper[4798]: I1011 04:13:49.565820 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/31e86dba-00e8-44be-8039-05c5043c67e1-logs" (OuterVolumeSpecName: "logs") pod "31e86dba-00e8-44be-8039-05c5043c67e1" (UID: "31e86dba-00e8-44be-8039-05c5043c67e1"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 04:13:49 crc kubenswrapper[4798]: I1011 04:13:49.568133 4798 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/31e86dba-00e8-44be-8039-05c5043c67e1-logs\") on node \"crc\" DevicePath \"\"" Oct 11 04:13:49 crc kubenswrapper[4798]: I1011 04:13:49.577661 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/31e86dba-00e8-44be-8039-05c5043c67e1-kube-api-access-j4r2g" (OuterVolumeSpecName: "kube-api-access-j4r2g") pod "31e86dba-00e8-44be-8039-05c5043c67e1" (UID: "31e86dba-00e8-44be-8039-05c5043c67e1"). InnerVolumeSpecName "kube-api-access-j4r2g". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 04:13:49 crc kubenswrapper[4798]: I1011 04:13:49.592931 4798 scope.go:117] "RemoveContainer" containerID="872af61a38b33c8ac361866d9e9ed48f138aadb7251adfa87def45ce6d66fc18" Oct 11 04:13:49 crc kubenswrapper[4798]: E1011 04:13:49.594011 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"872af61a38b33c8ac361866d9e9ed48f138aadb7251adfa87def45ce6d66fc18\": container with ID starting with 872af61a38b33c8ac361866d9e9ed48f138aadb7251adfa87def45ce6d66fc18 not found: ID does not exist" containerID="872af61a38b33c8ac361866d9e9ed48f138aadb7251adfa87def45ce6d66fc18" Oct 11 04:13:49 crc kubenswrapper[4798]: I1011 04:13:49.594093 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"872af61a38b33c8ac361866d9e9ed48f138aadb7251adfa87def45ce6d66fc18"} err="failed to get container status \"872af61a38b33c8ac361866d9e9ed48f138aadb7251adfa87def45ce6d66fc18\": rpc error: code = NotFound desc = could not find container \"872af61a38b33c8ac361866d9e9ed48f138aadb7251adfa87def45ce6d66fc18\": container with ID starting with 872af61a38b33c8ac361866d9e9ed48f138aadb7251adfa87def45ce6d66fc18 not found: ID does not exist" Oct 11 04:13:49 crc kubenswrapper[4798]: I1011 04:13:49.594120 4798 scope.go:117] "RemoveContainer" containerID="19acbc77625344bd76a48bc2a83efb20a94acd91cf5b3329ccd15ce284b91824" Oct 11 04:13:49 crc kubenswrapper[4798]: E1011 04:13:49.596287 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"19acbc77625344bd76a48bc2a83efb20a94acd91cf5b3329ccd15ce284b91824\": container with ID starting with 19acbc77625344bd76a48bc2a83efb20a94acd91cf5b3329ccd15ce284b91824 not found: ID does not exist" containerID="19acbc77625344bd76a48bc2a83efb20a94acd91cf5b3329ccd15ce284b91824" Oct 11 04:13:49 crc kubenswrapper[4798]: I1011 04:13:49.596320 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"19acbc77625344bd76a48bc2a83efb20a94acd91cf5b3329ccd15ce284b91824"} err="failed to get container status \"19acbc77625344bd76a48bc2a83efb20a94acd91cf5b3329ccd15ce284b91824\": rpc error: code = NotFound desc = could not find container \"19acbc77625344bd76a48bc2a83efb20a94acd91cf5b3329ccd15ce284b91824\": container with ID starting with 19acbc77625344bd76a48bc2a83efb20a94acd91cf5b3329ccd15ce284b91824 not found: ID does not exist" Oct 11 04:13:49 crc kubenswrapper[4798]: I1011 04:13:49.596301 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/31e86dba-00e8-44be-8039-05c5043c67e1-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "31e86dba-00e8-44be-8039-05c5043c67e1" (UID: "31e86dba-00e8-44be-8039-05c5043c67e1"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:13:49 crc kubenswrapper[4798]: I1011 04:13:49.606758 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/31e86dba-00e8-44be-8039-05c5043c67e1-config-data" (OuterVolumeSpecName: "config-data") pod "31e86dba-00e8-44be-8039-05c5043c67e1" (UID: "31e86dba-00e8-44be-8039-05c5043c67e1"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:13:49 crc kubenswrapper[4798]: I1011 04:13:49.670098 4798 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/31e86dba-00e8-44be-8039-05c5043c67e1-config-data\") on node \"crc\" DevicePath \"\"" Oct 11 04:13:49 crc kubenswrapper[4798]: I1011 04:13:49.670149 4798 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/31e86dba-00e8-44be-8039-05c5043c67e1-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 11 04:13:49 crc kubenswrapper[4798]: I1011 04:13:49.670162 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-j4r2g\" (UniqueName: \"kubernetes.io/projected/31e86dba-00e8-44be-8039-05c5043c67e1-kube-api-access-j4r2g\") on node \"crc\" DevicePath \"\"" Oct 11 04:13:50 crc kubenswrapper[4798]: I1011 04:13:50.515237 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Oct 11 04:13:50 crc kubenswrapper[4798]: I1011 04:13:50.574462 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Oct 11 04:13:50 crc kubenswrapper[4798]: I1011 04:13:50.640435 4798 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Oct 11 04:13:50 crc kubenswrapper[4798]: I1011 04:13:50.651481 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Oct 11 04:13:50 crc kubenswrapper[4798]: E1011 04:13:50.651937 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="31e86dba-00e8-44be-8039-05c5043c67e1" containerName="nova-api-api" Oct 11 04:13:50 crc kubenswrapper[4798]: I1011 04:13:50.651960 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="31e86dba-00e8-44be-8039-05c5043c67e1" containerName="nova-api-api" Oct 11 04:13:50 crc kubenswrapper[4798]: E1011 04:13:50.651972 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c4ba238e-3ec7-4703-862c-a1f14abda9c9" containerName="nova-scheduler-scheduler" Oct 11 04:13:50 crc kubenswrapper[4798]: I1011 04:13:50.651979 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="c4ba238e-3ec7-4703-862c-a1f14abda9c9" containerName="nova-scheduler-scheduler" Oct 11 04:13:50 crc kubenswrapper[4798]: E1011 04:13:50.652013 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="31e86dba-00e8-44be-8039-05c5043c67e1" containerName="nova-api-log" Oct 11 04:13:50 crc kubenswrapper[4798]: I1011 04:13:50.652019 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="31e86dba-00e8-44be-8039-05c5043c67e1" containerName="nova-api-log" Oct 11 04:13:50 crc kubenswrapper[4798]: I1011 04:13:50.652183 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="31e86dba-00e8-44be-8039-05c5043c67e1" containerName="nova-api-log" Oct 11 04:13:50 crc kubenswrapper[4798]: I1011 04:13:50.652204 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="31e86dba-00e8-44be-8039-05c5043c67e1" containerName="nova-api-api" Oct 11 04:13:50 crc kubenswrapper[4798]: I1011 04:13:50.652215 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="c4ba238e-3ec7-4703-862c-a1f14abda9c9" containerName="nova-scheduler-scheduler" Oct 11 04:13:50 crc kubenswrapper[4798]: I1011 04:13:50.653373 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Oct 11 04:13:50 crc kubenswrapper[4798]: I1011 04:13:50.658621 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Oct 11 04:13:50 crc kubenswrapper[4798]: I1011 04:13:50.659138 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Oct 11 04:13:50 crc kubenswrapper[4798]: I1011 04:13:50.810760 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6590a18a-25c1-4edd-beef-82369b00055a-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"6590a18a-25c1-4edd-beef-82369b00055a\") " pod="openstack/nova-api-0" Oct 11 04:13:50 crc kubenswrapper[4798]: I1011 04:13:50.810872 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6590a18a-25c1-4edd-beef-82369b00055a-config-data\") pod \"nova-api-0\" (UID: \"6590a18a-25c1-4edd-beef-82369b00055a\") " pod="openstack/nova-api-0" Oct 11 04:13:50 crc kubenswrapper[4798]: I1011 04:13:50.810928 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-46hjb\" (UniqueName: \"kubernetes.io/projected/6590a18a-25c1-4edd-beef-82369b00055a-kube-api-access-46hjb\") pod \"nova-api-0\" (UID: \"6590a18a-25c1-4edd-beef-82369b00055a\") " pod="openstack/nova-api-0" Oct 11 04:13:50 crc kubenswrapper[4798]: I1011 04:13:50.811172 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6590a18a-25c1-4edd-beef-82369b00055a-logs\") pod \"nova-api-0\" (UID: \"6590a18a-25c1-4edd-beef-82369b00055a\") " pod="openstack/nova-api-0" Oct 11 04:13:50 crc kubenswrapper[4798]: I1011 04:13:50.912720 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-46hjb\" (UniqueName: \"kubernetes.io/projected/6590a18a-25c1-4edd-beef-82369b00055a-kube-api-access-46hjb\") pod \"nova-api-0\" (UID: \"6590a18a-25c1-4edd-beef-82369b00055a\") " pod="openstack/nova-api-0" Oct 11 04:13:50 crc kubenswrapper[4798]: I1011 04:13:50.912860 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6590a18a-25c1-4edd-beef-82369b00055a-logs\") pod \"nova-api-0\" (UID: \"6590a18a-25c1-4edd-beef-82369b00055a\") " pod="openstack/nova-api-0" Oct 11 04:13:50 crc kubenswrapper[4798]: I1011 04:13:50.912907 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6590a18a-25c1-4edd-beef-82369b00055a-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"6590a18a-25c1-4edd-beef-82369b00055a\") " pod="openstack/nova-api-0" Oct 11 04:13:50 crc kubenswrapper[4798]: I1011 04:13:50.912966 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6590a18a-25c1-4edd-beef-82369b00055a-config-data\") pod \"nova-api-0\" (UID: \"6590a18a-25c1-4edd-beef-82369b00055a\") " pod="openstack/nova-api-0" Oct 11 04:13:50 crc kubenswrapper[4798]: I1011 04:13:50.913604 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6590a18a-25c1-4edd-beef-82369b00055a-logs\") pod \"nova-api-0\" (UID: \"6590a18a-25c1-4edd-beef-82369b00055a\") " pod="openstack/nova-api-0" Oct 11 04:13:50 crc kubenswrapper[4798]: I1011 04:13:50.917014 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6590a18a-25c1-4edd-beef-82369b00055a-config-data\") pod \"nova-api-0\" (UID: \"6590a18a-25c1-4edd-beef-82369b00055a\") " pod="openstack/nova-api-0" Oct 11 04:13:50 crc kubenswrapper[4798]: I1011 04:13:50.918232 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6590a18a-25c1-4edd-beef-82369b00055a-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"6590a18a-25c1-4edd-beef-82369b00055a\") " pod="openstack/nova-api-0" Oct 11 04:13:50 crc kubenswrapper[4798]: I1011 04:13:50.930044 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-46hjb\" (UniqueName: \"kubernetes.io/projected/6590a18a-25c1-4edd-beef-82369b00055a-kube-api-access-46hjb\") pod \"nova-api-0\" (UID: \"6590a18a-25c1-4edd-beef-82369b00055a\") " pod="openstack/nova-api-0" Oct 11 04:13:51 crc kubenswrapper[4798]: I1011 04:13:51.030083 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Oct 11 04:13:51 crc kubenswrapper[4798]: I1011 04:13:51.437281 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="31e86dba-00e8-44be-8039-05c5043c67e1" path="/var/lib/kubelet/pods/31e86dba-00e8-44be-8039-05c5043c67e1/volumes" Oct 11 04:13:51 crc kubenswrapper[4798]: I1011 04:13:51.515939 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Oct 11 04:13:51 crc kubenswrapper[4798]: I1011 04:13:51.869787 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Oct 11 04:13:51 crc kubenswrapper[4798]: I1011 04:13:51.870510 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Oct 11 04:13:52 crc kubenswrapper[4798]: I1011 04:13:52.534428 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"6590a18a-25c1-4edd-beef-82369b00055a","Type":"ContainerStarted","Data":"6cac8e54c7dc1456964bfd2358fb690a0e053512c28cef9a12317cb68cc3d4c7"} Oct 11 04:13:52 crc kubenswrapper[4798]: I1011 04:13:52.534817 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"6590a18a-25c1-4edd-beef-82369b00055a","Type":"ContainerStarted","Data":"b43be117c77833821d2dc38230f429404289bfa0b30f83b1ed13441b031d8b23"} Oct 11 04:13:52 crc kubenswrapper[4798]: I1011 04:13:52.534828 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"6590a18a-25c1-4edd-beef-82369b00055a","Type":"ContainerStarted","Data":"d92fa79e052ae646acfb143ef9f0b7200c085fefe445bfe7e10ff324b5583c71"} Oct 11 04:13:52 crc kubenswrapper[4798]: I1011 04:13:52.559735 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.559715499 podStartE2EDuration="2.559715499s" podCreationTimestamp="2025-10-11 04:13:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 04:13:52.551844382 +0000 UTC m=+1127.888134078" watchObservedRunningTime="2025-10-11 04:13:52.559715499 +0000 UTC m=+1127.896005185" Oct 11 04:13:55 crc kubenswrapper[4798]: I1011 04:13:55.869124 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-conductor-0" Oct 11 04:13:56 crc kubenswrapper[4798]: I1011 04:13:56.870332 4798 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Oct 11 04:13:56 crc kubenswrapper[4798]: I1011 04:13:56.870748 4798 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Oct 11 04:13:57 crc kubenswrapper[4798]: I1011 04:13:57.138957 4798 patch_prober.go:28] interesting pod/machine-config-daemon-h28s2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 11 04:13:57 crc kubenswrapper[4798]: I1011 04:13:57.139033 4798 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 11 04:13:57 crc kubenswrapper[4798]: I1011 04:13:57.139091 4798 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" Oct 11 04:13:57 crc kubenswrapper[4798]: I1011 04:13:57.140026 4798 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"d3a7cc4feeb865c985e6b8c7245a1d16577ae082c25541425e42b8ef5e8c15f6"} pod="openshift-machine-config-operator/machine-config-daemon-h28s2" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Oct 11 04:13:57 crc kubenswrapper[4798]: I1011 04:13:57.140093 4798 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" containerName="machine-config-daemon" containerID="cri-o://d3a7cc4feeb865c985e6b8c7245a1d16577ae082c25541425e42b8ef5e8c15f6" gracePeriod=600 Oct 11 04:13:57 crc kubenswrapper[4798]: I1011 04:13:57.628807 4798 generic.go:334] "Generic (PLEG): container finished" podID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" containerID="d3a7cc4feeb865c985e6b8c7245a1d16577ae082c25541425e42b8ef5e8c15f6" exitCode=0 Oct 11 04:13:57 crc kubenswrapper[4798]: I1011 04:13:57.629035 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" event={"ID":"42571bc8-2186-4e3b-bba9-28f5a8f364d0","Type":"ContainerDied","Data":"d3a7cc4feeb865c985e6b8c7245a1d16577ae082c25541425e42b8ef5e8c15f6"} Oct 11 04:13:57 crc kubenswrapper[4798]: I1011 04:13:57.629327 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" event={"ID":"42571bc8-2186-4e3b-bba9-28f5a8f364d0","Type":"ContainerStarted","Data":"0ede2098e77cd102846ac8838309a8f0eccb5d93df251b4af8b848dec6f5e092"} Oct 11 04:13:57 crc kubenswrapper[4798]: I1011 04:13:57.629358 4798 scope.go:117] "RemoveContainer" containerID="54c6a0004bcf04af0eb4f7beb3728e8dd11d3c9b035976cf54104ce60f0d2629" Oct 11 04:13:57 crc kubenswrapper[4798]: I1011 04:13:57.890594 4798 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="8048fd5b-4d35-45d2-8120-233af208d713" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.178:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Oct 11 04:13:57 crc kubenswrapper[4798]: I1011 04:13:57.890594 4798 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="8048fd5b-4d35-45d2-8120-233af208d713" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.178:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Oct 11 04:14:01 crc kubenswrapper[4798]: I1011 04:14:01.030789 4798 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Oct 11 04:14:01 crc kubenswrapper[4798]: I1011 04:14:01.031254 4798 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Oct 11 04:14:02 crc kubenswrapper[4798]: I1011 04:14:02.113575 4798 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="6590a18a-25c1-4edd-beef-82369b00055a" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.0.179:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Oct 11 04:14:02 crc kubenswrapper[4798]: I1011 04:14:02.113575 4798 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="6590a18a-25c1-4edd-beef-82369b00055a" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.0.179:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Oct 11 04:14:06 crc kubenswrapper[4798]: I1011 04:14:06.877552 4798 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Oct 11 04:14:06 crc kubenswrapper[4798]: I1011 04:14:06.879745 4798 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Oct 11 04:14:06 crc kubenswrapper[4798]: I1011 04:14:06.883820 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Oct 11 04:14:07 crc kubenswrapper[4798]: I1011 04:14:07.734675 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Oct 11 04:14:09 crc kubenswrapper[4798]: I1011 04:14:09.687073 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Oct 11 04:14:09 crc kubenswrapper[4798]: I1011 04:14:09.749085 4798 generic.go:334] "Generic (PLEG): container finished" podID="658d57c2-d353-435e-9715-abc5c2a66d1b" containerID="540726f5d6dd87652fc5999dc34e04212fe67ea741fd00ebf0ce7d461f02dd3b" exitCode=137 Oct 11 04:14:09 crc kubenswrapper[4798]: I1011 04:14:09.749228 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Oct 11 04:14:09 crc kubenswrapper[4798]: I1011 04:14:09.749297 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"658d57c2-d353-435e-9715-abc5c2a66d1b","Type":"ContainerDied","Data":"540726f5d6dd87652fc5999dc34e04212fe67ea741fd00ebf0ce7d461f02dd3b"} Oct 11 04:14:09 crc kubenswrapper[4798]: I1011 04:14:09.749356 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"658d57c2-d353-435e-9715-abc5c2a66d1b","Type":"ContainerDied","Data":"9f87fb84517092d246016bfc37a989be01223117aab7de7aa0023b00738884c0"} Oct 11 04:14:09 crc kubenswrapper[4798]: I1011 04:14:09.749385 4798 scope.go:117] "RemoveContainer" containerID="540726f5d6dd87652fc5999dc34e04212fe67ea741fd00ebf0ce7d461f02dd3b" Oct 11 04:14:09 crc kubenswrapper[4798]: I1011 04:14:09.777280 4798 scope.go:117] "RemoveContainer" containerID="540726f5d6dd87652fc5999dc34e04212fe67ea741fd00ebf0ce7d461f02dd3b" Oct 11 04:14:09 crc kubenswrapper[4798]: E1011 04:14:09.777974 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"540726f5d6dd87652fc5999dc34e04212fe67ea741fd00ebf0ce7d461f02dd3b\": container with ID starting with 540726f5d6dd87652fc5999dc34e04212fe67ea741fd00ebf0ce7d461f02dd3b not found: ID does not exist" containerID="540726f5d6dd87652fc5999dc34e04212fe67ea741fd00ebf0ce7d461f02dd3b" Oct 11 04:14:09 crc kubenswrapper[4798]: I1011 04:14:09.778084 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"540726f5d6dd87652fc5999dc34e04212fe67ea741fd00ebf0ce7d461f02dd3b"} err="failed to get container status \"540726f5d6dd87652fc5999dc34e04212fe67ea741fd00ebf0ce7d461f02dd3b\": rpc error: code = NotFound desc = could not find container \"540726f5d6dd87652fc5999dc34e04212fe67ea741fd00ebf0ce7d461f02dd3b\": container with ID starting with 540726f5d6dd87652fc5999dc34e04212fe67ea741fd00ebf0ce7d461f02dd3b not found: ID does not exist" Oct 11 04:14:09 crc kubenswrapper[4798]: I1011 04:14:09.786982 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/658d57c2-d353-435e-9715-abc5c2a66d1b-config-data\") pod \"658d57c2-d353-435e-9715-abc5c2a66d1b\" (UID: \"658d57c2-d353-435e-9715-abc5c2a66d1b\") " Oct 11 04:14:09 crc kubenswrapper[4798]: I1011 04:14:09.787218 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-54pxx\" (UniqueName: \"kubernetes.io/projected/658d57c2-d353-435e-9715-abc5c2a66d1b-kube-api-access-54pxx\") pod \"658d57c2-d353-435e-9715-abc5c2a66d1b\" (UID: \"658d57c2-d353-435e-9715-abc5c2a66d1b\") " Oct 11 04:14:09 crc kubenswrapper[4798]: I1011 04:14:09.787309 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/658d57c2-d353-435e-9715-abc5c2a66d1b-combined-ca-bundle\") pod \"658d57c2-d353-435e-9715-abc5c2a66d1b\" (UID: \"658d57c2-d353-435e-9715-abc5c2a66d1b\") " Oct 11 04:14:09 crc kubenswrapper[4798]: I1011 04:14:09.797606 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/658d57c2-d353-435e-9715-abc5c2a66d1b-kube-api-access-54pxx" (OuterVolumeSpecName: "kube-api-access-54pxx") pod "658d57c2-d353-435e-9715-abc5c2a66d1b" (UID: "658d57c2-d353-435e-9715-abc5c2a66d1b"). InnerVolumeSpecName "kube-api-access-54pxx". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 04:14:09 crc kubenswrapper[4798]: I1011 04:14:09.819168 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/658d57c2-d353-435e-9715-abc5c2a66d1b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "658d57c2-d353-435e-9715-abc5c2a66d1b" (UID: "658d57c2-d353-435e-9715-abc5c2a66d1b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:14:09 crc kubenswrapper[4798]: I1011 04:14:09.829826 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/658d57c2-d353-435e-9715-abc5c2a66d1b-config-data" (OuterVolumeSpecName: "config-data") pod "658d57c2-d353-435e-9715-abc5c2a66d1b" (UID: "658d57c2-d353-435e-9715-abc5c2a66d1b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:14:09 crc kubenswrapper[4798]: I1011 04:14:09.890929 4798 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/658d57c2-d353-435e-9715-abc5c2a66d1b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 11 04:14:09 crc kubenswrapper[4798]: I1011 04:14:09.890970 4798 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/658d57c2-d353-435e-9715-abc5c2a66d1b-config-data\") on node \"crc\" DevicePath \"\"" Oct 11 04:14:09 crc kubenswrapper[4798]: I1011 04:14:09.890987 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-54pxx\" (UniqueName: \"kubernetes.io/projected/658d57c2-d353-435e-9715-abc5c2a66d1b-kube-api-access-54pxx\") on node \"crc\" DevicePath \"\"" Oct 11 04:14:10 crc kubenswrapper[4798]: I1011 04:14:10.083198 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Oct 11 04:14:10 crc kubenswrapper[4798]: I1011 04:14:10.093766 4798 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Oct 11 04:14:10 crc kubenswrapper[4798]: I1011 04:14:10.111812 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Oct 11 04:14:10 crc kubenswrapper[4798]: E1011 04:14:10.112602 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="658d57c2-d353-435e-9715-abc5c2a66d1b" containerName="nova-cell1-novncproxy-novncproxy" Oct 11 04:14:10 crc kubenswrapper[4798]: I1011 04:14:10.112692 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="658d57c2-d353-435e-9715-abc5c2a66d1b" containerName="nova-cell1-novncproxy-novncproxy" Oct 11 04:14:10 crc kubenswrapper[4798]: I1011 04:14:10.112959 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="658d57c2-d353-435e-9715-abc5c2a66d1b" containerName="nova-cell1-novncproxy-novncproxy" Oct 11 04:14:10 crc kubenswrapper[4798]: I1011 04:14:10.113771 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Oct 11 04:14:10 crc kubenswrapper[4798]: I1011 04:14:10.117885 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-public-svc" Oct 11 04:14:10 crc kubenswrapper[4798]: I1011 04:14:10.118707 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Oct 11 04:14:10 crc kubenswrapper[4798]: I1011 04:14:10.118979 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-vencrypt" Oct 11 04:14:10 crc kubenswrapper[4798]: I1011 04:14:10.146534 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Oct 11 04:14:10 crc kubenswrapper[4798]: I1011 04:14:10.196572 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wq6sl\" (UniqueName: \"kubernetes.io/projected/ba9b4b7f-46a9-4e6e-9c35-4a114df18a64-kube-api-access-wq6sl\") pod \"nova-cell1-novncproxy-0\" (UID: \"ba9b4b7f-46a9-4e6e-9c35-4a114df18a64\") " pod="openstack/nova-cell1-novncproxy-0" Oct 11 04:14:10 crc kubenswrapper[4798]: I1011 04:14:10.196627 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ba9b4b7f-46a9-4e6e-9c35-4a114df18a64-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"ba9b4b7f-46a9-4e6e-9c35-4a114df18a64\") " pod="openstack/nova-cell1-novncproxy-0" Oct 11 04:14:10 crc kubenswrapper[4798]: I1011 04:14:10.196688 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ba9b4b7f-46a9-4e6e-9c35-4a114df18a64-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"ba9b4b7f-46a9-4e6e-9c35-4a114df18a64\") " pod="openstack/nova-cell1-novncproxy-0" Oct 11 04:14:10 crc kubenswrapper[4798]: I1011 04:14:10.196726 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/ba9b4b7f-46a9-4e6e-9c35-4a114df18a64-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"ba9b4b7f-46a9-4e6e-9c35-4a114df18a64\") " pod="openstack/nova-cell1-novncproxy-0" Oct 11 04:14:10 crc kubenswrapper[4798]: I1011 04:14:10.196746 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/ba9b4b7f-46a9-4e6e-9c35-4a114df18a64-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"ba9b4b7f-46a9-4e6e-9c35-4a114df18a64\") " pod="openstack/nova-cell1-novncproxy-0" Oct 11 04:14:10 crc kubenswrapper[4798]: I1011 04:14:10.298093 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wq6sl\" (UniqueName: \"kubernetes.io/projected/ba9b4b7f-46a9-4e6e-9c35-4a114df18a64-kube-api-access-wq6sl\") pod \"nova-cell1-novncproxy-0\" (UID: \"ba9b4b7f-46a9-4e6e-9c35-4a114df18a64\") " pod="openstack/nova-cell1-novncproxy-0" Oct 11 04:14:10 crc kubenswrapper[4798]: I1011 04:14:10.298386 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ba9b4b7f-46a9-4e6e-9c35-4a114df18a64-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"ba9b4b7f-46a9-4e6e-9c35-4a114df18a64\") " pod="openstack/nova-cell1-novncproxy-0" Oct 11 04:14:10 crc kubenswrapper[4798]: I1011 04:14:10.298619 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ba9b4b7f-46a9-4e6e-9c35-4a114df18a64-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"ba9b4b7f-46a9-4e6e-9c35-4a114df18a64\") " pod="openstack/nova-cell1-novncproxy-0" Oct 11 04:14:10 crc kubenswrapper[4798]: I1011 04:14:10.299035 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/ba9b4b7f-46a9-4e6e-9c35-4a114df18a64-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"ba9b4b7f-46a9-4e6e-9c35-4a114df18a64\") " pod="openstack/nova-cell1-novncproxy-0" Oct 11 04:14:10 crc kubenswrapper[4798]: I1011 04:14:10.299129 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/ba9b4b7f-46a9-4e6e-9c35-4a114df18a64-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"ba9b4b7f-46a9-4e6e-9c35-4a114df18a64\") " pod="openstack/nova-cell1-novncproxy-0" Oct 11 04:14:10 crc kubenswrapper[4798]: I1011 04:14:10.304884 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ba9b4b7f-46a9-4e6e-9c35-4a114df18a64-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"ba9b4b7f-46a9-4e6e-9c35-4a114df18a64\") " pod="openstack/nova-cell1-novncproxy-0" Oct 11 04:14:10 crc kubenswrapper[4798]: I1011 04:14:10.305235 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/ba9b4b7f-46a9-4e6e-9c35-4a114df18a64-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"ba9b4b7f-46a9-4e6e-9c35-4a114df18a64\") " pod="openstack/nova-cell1-novncproxy-0" Oct 11 04:14:10 crc kubenswrapper[4798]: I1011 04:14:10.305889 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ba9b4b7f-46a9-4e6e-9c35-4a114df18a64-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"ba9b4b7f-46a9-4e6e-9c35-4a114df18a64\") " pod="openstack/nova-cell1-novncproxy-0" Oct 11 04:14:10 crc kubenswrapper[4798]: I1011 04:14:10.308752 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/ba9b4b7f-46a9-4e6e-9c35-4a114df18a64-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"ba9b4b7f-46a9-4e6e-9c35-4a114df18a64\") " pod="openstack/nova-cell1-novncproxy-0" Oct 11 04:14:10 crc kubenswrapper[4798]: I1011 04:14:10.319670 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wq6sl\" (UniqueName: \"kubernetes.io/projected/ba9b4b7f-46a9-4e6e-9c35-4a114df18a64-kube-api-access-wq6sl\") pod \"nova-cell1-novncproxy-0\" (UID: \"ba9b4b7f-46a9-4e6e-9c35-4a114df18a64\") " pod="openstack/nova-cell1-novncproxy-0" Oct 11 04:14:10 crc kubenswrapper[4798]: I1011 04:14:10.431865 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Oct 11 04:14:10 crc kubenswrapper[4798]: I1011 04:14:10.922791 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Oct 11 04:14:11 crc kubenswrapper[4798]: I1011 04:14:11.035456 4798 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Oct 11 04:14:11 crc kubenswrapper[4798]: I1011 04:14:11.035966 4798 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Oct 11 04:14:11 crc kubenswrapper[4798]: I1011 04:14:11.036356 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Oct 11 04:14:11 crc kubenswrapper[4798]: I1011 04:14:11.036681 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Oct 11 04:14:11 crc kubenswrapper[4798]: I1011 04:14:11.039333 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Oct 11 04:14:11 crc kubenswrapper[4798]: I1011 04:14:11.040529 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Oct 11 04:14:11 crc kubenswrapper[4798]: I1011 04:14:11.254677 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-68d4b6d797-k76dd"] Oct 11 04:14:11 crc kubenswrapper[4798]: I1011 04:14:11.256356 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-68d4b6d797-k76dd" Oct 11 04:14:11 crc kubenswrapper[4798]: I1011 04:14:11.279357 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-68d4b6d797-k76dd"] Oct 11 04:14:11 crc kubenswrapper[4798]: I1011 04:14:11.322624 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/fa13805f-6f19-41b8-a0a7-5a18e9e1c908-ovsdbserver-nb\") pod \"dnsmasq-dns-68d4b6d797-k76dd\" (UID: \"fa13805f-6f19-41b8-a0a7-5a18e9e1c908\") " pod="openstack/dnsmasq-dns-68d4b6d797-k76dd" Oct 11 04:14:11 crc kubenswrapper[4798]: I1011 04:14:11.322684 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/fa13805f-6f19-41b8-a0a7-5a18e9e1c908-ovsdbserver-sb\") pod \"dnsmasq-dns-68d4b6d797-k76dd\" (UID: \"fa13805f-6f19-41b8-a0a7-5a18e9e1c908\") " pod="openstack/dnsmasq-dns-68d4b6d797-k76dd" Oct 11 04:14:11 crc kubenswrapper[4798]: I1011 04:14:11.322770 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jhsmv\" (UniqueName: \"kubernetes.io/projected/fa13805f-6f19-41b8-a0a7-5a18e9e1c908-kube-api-access-jhsmv\") pod \"dnsmasq-dns-68d4b6d797-k76dd\" (UID: \"fa13805f-6f19-41b8-a0a7-5a18e9e1c908\") " pod="openstack/dnsmasq-dns-68d4b6d797-k76dd" Oct 11 04:14:11 crc kubenswrapper[4798]: I1011 04:14:11.323028 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fa13805f-6f19-41b8-a0a7-5a18e9e1c908-config\") pod \"dnsmasq-dns-68d4b6d797-k76dd\" (UID: \"fa13805f-6f19-41b8-a0a7-5a18e9e1c908\") " pod="openstack/dnsmasq-dns-68d4b6d797-k76dd" Oct 11 04:14:11 crc kubenswrapper[4798]: I1011 04:14:11.323123 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/fa13805f-6f19-41b8-a0a7-5a18e9e1c908-dns-svc\") pod \"dnsmasq-dns-68d4b6d797-k76dd\" (UID: \"fa13805f-6f19-41b8-a0a7-5a18e9e1c908\") " pod="openstack/dnsmasq-dns-68d4b6d797-k76dd" Oct 11 04:14:11 crc kubenswrapper[4798]: I1011 04:14:11.424622 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/fa13805f-6f19-41b8-a0a7-5a18e9e1c908-ovsdbserver-nb\") pod \"dnsmasq-dns-68d4b6d797-k76dd\" (UID: \"fa13805f-6f19-41b8-a0a7-5a18e9e1c908\") " pod="openstack/dnsmasq-dns-68d4b6d797-k76dd" Oct 11 04:14:11 crc kubenswrapper[4798]: I1011 04:14:11.424917 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/fa13805f-6f19-41b8-a0a7-5a18e9e1c908-ovsdbserver-sb\") pod \"dnsmasq-dns-68d4b6d797-k76dd\" (UID: \"fa13805f-6f19-41b8-a0a7-5a18e9e1c908\") " pod="openstack/dnsmasq-dns-68d4b6d797-k76dd" Oct 11 04:14:11 crc kubenswrapper[4798]: I1011 04:14:11.424977 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jhsmv\" (UniqueName: \"kubernetes.io/projected/fa13805f-6f19-41b8-a0a7-5a18e9e1c908-kube-api-access-jhsmv\") pod \"dnsmasq-dns-68d4b6d797-k76dd\" (UID: \"fa13805f-6f19-41b8-a0a7-5a18e9e1c908\") " pod="openstack/dnsmasq-dns-68d4b6d797-k76dd" Oct 11 04:14:11 crc kubenswrapper[4798]: I1011 04:14:11.425045 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fa13805f-6f19-41b8-a0a7-5a18e9e1c908-config\") pod \"dnsmasq-dns-68d4b6d797-k76dd\" (UID: \"fa13805f-6f19-41b8-a0a7-5a18e9e1c908\") " pod="openstack/dnsmasq-dns-68d4b6d797-k76dd" Oct 11 04:14:11 crc kubenswrapper[4798]: I1011 04:14:11.425073 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/fa13805f-6f19-41b8-a0a7-5a18e9e1c908-dns-svc\") pod \"dnsmasq-dns-68d4b6d797-k76dd\" (UID: \"fa13805f-6f19-41b8-a0a7-5a18e9e1c908\") " pod="openstack/dnsmasq-dns-68d4b6d797-k76dd" Oct 11 04:14:11 crc kubenswrapper[4798]: I1011 04:14:11.425985 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/fa13805f-6f19-41b8-a0a7-5a18e9e1c908-ovsdbserver-sb\") pod \"dnsmasq-dns-68d4b6d797-k76dd\" (UID: \"fa13805f-6f19-41b8-a0a7-5a18e9e1c908\") " pod="openstack/dnsmasq-dns-68d4b6d797-k76dd" Oct 11 04:14:11 crc kubenswrapper[4798]: I1011 04:14:11.425991 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/fa13805f-6f19-41b8-a0a7-5a18e9e1c908-ovsdbserver-nb\") pod \"dnsmasq-dns-68d4b6d797-k76dd\" (UID: \"fa13805f-6f19-41b8-a0a7-5a18e9e1c908\") " pod="openstack/dnsmasq-dns-68d4b6d797-k76dd" Oct 11 04:14:11 crc kubenswrapper[4798]: I1011 04:14:11.426686 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fa13805f-6f19-41b8-a0a7-5a18e9e1c908-config\") pod \"dnsmasq-dns-68d4b6d797-k76dd\" (UID: \"fa13805f-6f19-41b8-a0a7-5a18e9e1c908\") " pod="openstack/dnsmasq-dns-68d4b6d797-k76dd" Oct 11 04:14:11 crc kubenswrapper[4798]: I1011 04:14:11.426801 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/fa13805f-6f19-41b8-a0a7-5a18e9e1c908-dns-svc\") pod \"dnsmasq-dns-68d4b6d797-k76dd\" (UID: \"fa13805f-6f19-41b8-a0a7-5a18e9e1c908\") " pod="openstack/dnsmasq-dns-68d4b6d797-k76dd" Oct 11 04:14:11 crc kubenswrapper[4798]: I1011 04:14:11.435634 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="658d57c2-d353-435e-9715-abc5c2a66d1b" path="/var/lib/kubelet/pods/658d57c2-d353-435e-9715-abc5c2a66d1b/volumes" Oct 11 04:14:11 crc kubenswrapper[4798]: I1011 04:14:11.450511 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jhsmv\" (UniqueName: \"kubernetes.io/projected/fa13805f-6f19-41b8-a0a7-5a18e9e1c908-kube-api-access-jhsmv\") pod \"dnsmasq-dns-68d4b6d797-k76dd\" (UID: \"fa13805f-6f19-41b8-a0a7-5a18e9e1c908\") " pod="openstack/dnsmasq-dns-68d4b6d797-k76dd" Oct 11 04:14:11 crc kubenswrapper[4798]: I1011 04:14:11.594440 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-68d4b6d797-k76dd" Oct 11 04:14:11 crc kubenswrapper[4798]: I1011 04:14:11.785005 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"ba9b4b7f-46a9-4e6e-9c35-4a114df18a64","Type":"ContainerStarted","Data":"c414596491cbd6e24f17ba9c5e33c7028fbda9ba364f20c1e86615e7b891dac9"} Oct 11 04:14:11 crc kubenswrapper[4798]: I1011 04:14:11.785210 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"ba9b4b7f-46a9-4e6e-9c35-4a114df18a64","Type":"ContainerStarted","Data":"34fab1081991fd2bf6f7ac57c0fa6fcb4cb928af93b8a4619c10f8fafac7a2a5"} Oct 11 04:14:11 crc kubenswrapper[4798]: I1011 04:14:11.819245 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=1.819220453 podStartE2EDuration="1.819220453s" podCreationTimestamp="2025-10-11 04:14:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 04:14:11.805321052 +0000 UTC m=+1147.141610738" watchObservedRunningTime="2025-10-11 04:14:11.819220453 +0000 UTC m=+1147.155510139" Oct 11 04:14:12 crc kubenswrapper[4798]: I1011 04:14:12.104717 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-68d4b6d797-k76dd"] Oct 11 04:14:12 crc kubenswrapper[4798]: I1011 04:14:12.794779 4798 generic.go:334] "Generic (PLEG): container finished" podID="fa13805f-6f19-41b8-a0a7-5a18e9e1c908" containerID="549bafb0ea114b72205d890e7d87a7659ad5fa639404a8020adb7a19cd7b417b" exitCode=0 Oct 11 04:14:12 crc kubenswrapper[4798]: I1011 04:14:12.794904 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-68d4b6d797-k76dd" event={"ID":"fa13805f-6f19-41b8-a0a7-5a18e9e1c908","Type":"ContainerDied","Data":"549bafb0ea114b72205d890e7d87a7659ad5fa639404a8020adb7a19cd7b417b"} Oct 11 04:14:12 crc kubenswrapper[4798]: I1011 04:14:12.795188 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-68d4b6d797-k76dd" event={"ID":"fa13805f-6f19-41b8-a0a7-5a18e9e1c908","Type":"ContainerStarted","Data":"ccaa8d1d17851c00fd09e9b592430bf1fd26a57b4170d0200d46951a61c9299e"} Oct 11 04:14:13 crc kubenswrapper[4798]: I1011 04:14:13.813502 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-68d4b6d797-k76dd" event={"ID":"fa13805f-6f19-41b8-a0a7-5a18e9e1c908","Type":"ContainerStarted","Data":"1e15afb6381e5aa41d957825a14940bb8501f404457bcae1758e35163b47ca3e"} Oct 11 04:14:13 crc kubenswrapper[4798]: I1011 04:14:13.814369 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-68d4b6d797-k76dd" Oct 11 04:14:13 crc kubenswrapper[4798]: I1011 04:14:13.861208 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Oct 11 04:14:13 crc kubenswrapper[4798]: I1011 04:14:13.862014 4798 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="e3e55d17-f35e-4c87-b08a-8f6d8800ebf3" containerName="ceilometer-central-agent" containerID="cri-o://05a92666dffc19e59f07cda0613c586e8e42483e7d638360864f45f8c20b70d7" gracePeriod=30 Oct 11 04:14:13 crc kubenswrapper[4798]: I1011 04:14:13.862867 4798 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="e3e55d17-f35e-4c87-b08a-8f6d8800ebf3" containerName="proxy-httpd" containerID="cri-o://64724848ea57443261bfed5ee339b35f38906fbc635050e9786f2e97714eedb7" gracePeriod=30 Oct 11 04:14:13 crc kubenswrapper[4798]: I1011 04:14:13.862979 4798 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="e3e55d17-f35e-4c87-b08a-8f6d8800ebf3" containerName="sg-core" containerID="cri-o://61f1aeb503d37741b7ebb14ead5e6ee4ea5b1a0593d9edafa9fb78f36e468b4b" gracePeriod=30 Oct 11 04:14:13 crc kubenswrapper[4798]: I1011 04:14:13.863145 4798 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="e3e55d17-f35e-4c87-b08a-8f6d8800ebf3" containerName="ceilometer-notification-agent" containerID="cri-o://1a418281aa55263c9d1b564e523ae852ba08bf62b520dd2f7bb70c1feff69d0e" gracePeriod=30 Oct 11 04:14:13 crc kubenswrapper[4798]: I1011 04:14:13.875984 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-68d4b6d797-k76dd" podStartSLOduration=2.8759524340000002 podStartE2EDuration="2.875952434s" podCreationTimestamp="2025-10-11 04:14:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 04:14:13.849010462 +0000 UTC m=+1149.185300148" watchObservedRunningTime="2025-10-11 04:14:13.875952434 +0000 UTC m=+1149.212242120" Oct 11 04:14:14 crc kubenswrapper[4798]: I1011 04:14:14.461528 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Oct 11 04:14:14 crc kubenswrapper[4798]: I1011 04:14:14.462637 4798 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="6590a18a-25c1-4edd-beef-82369b00055a" containerName="nova-api-log" containerID="cri-o://b43be117c77833821d2dc38230f429404289bfa0b30f83b1ed13441b031d8b23" gracePeriod=30 Oct 11 04:14:14 crc kubenswrapper[4798]: I1011 04:14:14.462986 4798 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="6590a18a-25c1-4edd-beef-82369b00055a" containerName="nova-api-api" containerID="cri-o://6cac8e54c7dc1456964bfd2358fb690a0e053512c28cef9a12317cb68cc3d4c7" gracePeriod=30 Oct 11 04:14:14 crc kubenswrapper[4798]: E1011 04:14:14.736183 4798 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6590a18a_25c1_4edd_beef_82369b00055a.slice/crio-conmon-b43be117c77833821d2dc38230f429404289bfa0b30f83b1ed13441b031d8b23.scope\": RecentStats: unable to find data in memory cache]" Oct 11 04:14:14 crc kubenswrapper[4798]: I1011 04:14:14.826844 4798 generic.go:334] "Generic (PLEG): container finished" podID="e3e55d17-f35e-4c87-b08a-8f6d8800ebf3" containerID="64724848ea57443261bfed5ee339b35f38906fbc635050e9786f2e97714eedb7" exitCode=0 Oct 11 04:14:14 crc kubenswrapper[4798]: I1011 04:14:14.826902 4798 generic.go:334] "Generic (PLEG): container finished" podID="e3e55d17-f35e-4c87-b08a-8f6d8800ebf3" containerID="61f1aeb503d37741b7ebb14ead5e6ee4ea5b1a0593d9edafa9fb78f36e468b4b" exitCode=2 Oct 11 04:14:14 crc kubenswrapper[4798]: I1011 04:14:14.826916 4798 generic.go:334] "Generic (PLEG): container finished" podID="e3e55d17-f35e-4c87-b08a-8f6d8800ebf3" containerID="05a92666dffc19e59f07cda0613c586e8e42483e7d638360864f45f8c20b70d7" exitCode=0 Oct 11 04:14:14 crc kubenswrapper[4798]: I1011 04:14:14.826923 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e3e55d17-f35e-4c87-b08a-8f6d8800ebf3","Type":"ContainerDied","Data":"64724848ea57443261bfed5ee339b35f38906fbc635050e9786f2e97714eedb7"} Oct 11 04:14:14 crc kubenswrapper[4798]: I1011 04:14:14.826978 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e3e55d17-f35e-4c87-b08a-8f6d8800ebf3","Type":"ContainerDied","Data":"61f1aeb503d37741b7ebb14ead5e6ee4ea5b1a0593d9edafa9fb78f36e468b4b"} Oct 11 04:14:14 crc kubenswrapper[4798]: I1011 04:14:14.826995 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e3e55d17-f35e-4c87-b08a-8f6d8800ebf3","Type":"ContainerDied","Data":"05a92666dffc19e59f07cda0613c586e8e42483e7d638360864f45f8c20b70d7"} Oct 11 04:14:14 crc kubenswrapper[4798]: I1011 04:14:14.829430 4798 generic.go:334] "Generic (PLEG): container finished" podID="6590a18a-25c1-4edd-beef-82369b00055a" containerID="b43be117c77833821d2dc38230f429404289bfa0b30f83b1ed13441b031d8b23" exitCode=143 Oct 11 04:14:14 crc kubenswrapper[4798]: I1011 04:14:14.829503 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"6590a18a-25c1-4edd-beef-82369b00055a","Type":"ContainerDied","Data":"b43be117c77833821d2dc38230f429404289bfa0b30f83b1ed13441b031d8b23"} Oct 11 04:14:15 crc kubenswrapper[4798]: I1011 04:14:15.435244 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Oct 11 04:14:16 crc kubenswrapper[4798]: I1011 04:14:16.406658 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Oct 11 04:14:16 crc kubenswrapper[4798]: I1011 04:14:16.544643 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e3e55d17-f35e-4c87-b08a-8f6d8800ebf3-run-httpd\") pod \"e3e55d17-f35e-4c87-b08a-8f6d8800ebf3\" (UID: \"e3e55d17-f35e-4c87-b08a-8f6d8800ebf3\") " Oct 11 04:14:16 crc kubenswrapper[4798]: I1011 04:14:16.545175 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e3e55d17-f35e-4c87-b08a-8f6d8800ebf3-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "e3e55d17-f35e-4c87-b08a-8f6d8800ebf3" (UID: "e3e55d17-f35e-4c87-b08a-8f6d8800ebf3"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 04:14:16 crc kubenswrapper[4798]: I1011 04:14:16.545293 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e3e55d17-f35e-4c87-b08a-8f6d8800ebf3-config-data\") pod \"e3e55d17-f35e-4c87-b08a-8f6d8800ebf3\" (UID: \"e3e55d17-f35e-4c87-b08a-8f6d8800ebf3\") " Oct 11 04:14:16 crc kubenswrapper[4798]: I1011 04:14:16.545330 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lmw7w\" (UniqueName: \"kubernetes.io/projected/e3e55d17-f35e-4c87-b08a-8f6d8800ebf3-kube-api-access-lmw7w\") pod \"e3e55d17-f35e-4c87-b08a-8f6d8800ebf3\" (UID: \"e3e55d17-f35e-4c87-b08a-8f6d8800ebf3\") " Oct 11 04:14:16 crc kubenswrapper[4798]: I1011 04:14:16.545435 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e3e55d17-f35e-4c87-b08a-8f6d8800ebf3-scripts\") pod \"e3e55d17-f35e-4c87-b08a-8f6d8800ebf3\" (UID: \"e3e55d17-f35e-4c87-b08a-8f6d8800ebf3\") " Oct 11 04:14:16 crc kubenswrapper[4798]: I1011 04:14:16.545474 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e3e55d17-f35e-4c87-b08a-8f6d8800ebf3-combined-ca-bundle\") pod \"e3e55d17-f35e-4c87-b08a-8f6d8800ebf3\" (UID: \"e3e55d17-f35e-4c87-b08a-8f6d8800ebf3\") " Oct 11 04:14:16 crc kubenswrapper[4798]: I1011 04:14:16.545633 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e3e55d17-f35e-4c87-b08a-8f6d8800ebf3-log-httpd\") pod \"e3e55d17-f35e-4c87-b08a-8f6d8800ebf3\" (UID: \"e3e55d17-f35e-4c87-b08a-8f6d8800ebf3\") " Oct 11 04:14:16 crc kubenswrapper[4798]: I1011 04:14:16.545671 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/e3e55d17-f35e-4c87-b08a-8f6d8800ebf3-ceilometer-tls-certs\") pod \"e3e55d17-f35e-4c87-b08a-8f6d8800ebf3\" (UID: \"e3e55d17-f35e-4c87-b08a-8f6d8800ebf3\") " Oct 11 04:14:16 crc kubenswrapper[4798]: I1011 04:14:16.545732 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/e3e55d17-f35e-4c87-b08a-8f6d8800ebf3-sg-core-conf-yaml\") pod \"e3e55d17-f35e-4c87-b08a-8f6d8800ebf3\" (UID: \"e3e55d17-f35e-4c87-b08a-8f6d8800ebf3\") " Oct 11 04:14:16 crc kubenswrapper[4798]: I1011 04:14:16.546710 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e3e55d17-f35e-4c87-b08a-8f6d8800ebf3-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "e3e55d17-f35e-4c87-b08a-8f6d8800ebf3" (UID: "e3e55d17-f35e-4c87-b08a-8f6d8800ebf3"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 04:14:16 crc kubenswrapper[4798]: I1011 04:14:16.547179 4798 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e3e55d17-f35e-4c87-b08a-8f6d8800ebf3-log-httpd\") on node \"crc\" DevicePath \"\"" Oct 11 04:14:16 crc kubenswrapper[4798]: I1011 04:14:16.547197 4798 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e3e55d17-f35e-4c87-b08a-8f6d8800ebf3-run-httpd\") on node \"crc\" DevicePath \"\"" Oct 11 04:14:16 crc kubenswrapper[4798]: I1011 04:14:16.554354 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e3e55d17-f35e-4c87-b08a-8f6d8800ebf3-kube-api-access-lmw7w" (OuterVolumeSpecName: "kube-api-access-lmw7w") pod "e3e55d17-f35e-4c87-b08a-8f6d8800ebf3" (UID: "e3e55d17-f35e-4c87-b08a-8f6d8800ebf3"). InnerVolumeSpecName "kube-api-access-lmw7w". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 04:14:16 crc kubenswrapper[4798]: I1011 04:14:16.556935 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e3e55d17-f35e-4c87-b08a-8f6d8800ebf3-scripts" (OuterVolumeSpecName: "scripts") pod "e3e55d17-f35e-4c87-b08a-8f6d8800ebf3" (UID: "e3e55d17-f35e-4c87-b08a-8f6d8800ebf3"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:14:16 crc kubenswrapper[4798]: I1011 04:14:16.579211 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e3e55d17-f35e-4c87-b08a-8f6d8800ebf3-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "e3e55d17-f35e-4c87-b08a-8f6d8800ebf3" (UID: "e3e55d17-f35e-4c87-b08a-8f6d8800ebf3"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:14:16 crc kubenswrapper[4798]: I1011 04:14:16.601183 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e3e55d17-f35e-4c87-b08a-8f6d8800ebf3-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "e3e55d17-f35e-4c87-b08a-8f6d8800ebf3" (UID: "e3e55d17-f35e-4c87-b08a-8f6d8800ebf3"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:14:16 crc kubenswrapper[4798]: I1011 04:14:16.640248 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e3e55d17-f35e-4c87-b08a-8f6d8800ebf3-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e3e55d17-f35e-4c87-b08a-8f6d8800ebf3" (UID: "e3e55d17-f35e-4c87-b08a-8f6d8800ebf3"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:14:16 crc kubenswrapper[4798]: I1011 04:14:16.651231 4798 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/e3e55d17-f35e-4c87-b08a-8f6d8800ebf3-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Oct 11 04:14:16 crc kubenswrapper[4798]: I1011 04:14:16.651269 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lmw7w\" (UniqueName: \"kubernetes.io/projected/e3e55d17-f35e-4c87-b08a-8f6d8800ebf3-kube-api-access-lmw7w\") on node \"crc\" DevicePath \"\"" Oct 11 04:14:16 crc kubenswrapper[4798]: I1011 04:14:16.651286 4798 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e3e55d17-f35e-4c87-b08a-8f6d8800ebf3-scripts\") on node \"crc\" DevicePath \"\"" Oct 11 04:14:16 crc kubenswrapper[4798]: I1011 04:14:16.651299 4798 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e3e55d17-f35e-4c87-b08a-8f6d8800ebf3-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 11 04:14:16 crc kubenswrapper[4798]: I1011 04:14:16.651335 4798 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/e3e55d17-f35e-4c87-b08a-8f6d8800ebf3-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Oct 11 04:14:16 crc kubenswrapper[4798]: I1011 04:14:16.681171 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e3e55d17-f35e-4c87-b08a-8f6d8800ebf3-config-data" (OuterVolumeSpecName: "config-data") pod "e3e55d17-f35e-4c87-b08a-8f6d8800ebf3" (UID: "e3e55d17-f35e-4c87-b08a-8f6d8800ebf3"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:14:16 crc kubenswrapper[4798]: I1011 04:14:16.754292 4798 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e3e55d17-f35e-4c87-b08a-8f6d8800ebf3-config-data\") on node \"crc\" DevicePath \"\"" Oct 11 04:14:16 crc kubenswrapper[4798]: I1011 04:14:16.871192 4798 generic.go:334] "Generic (PLEG): container finished" podID="e3e55d17-f35e-4c87-b08a-8f6d8800ebf3" containerID="1a418281aa55263c9d1b564e523ae852ba08bf62b520dd2f7bb70c1feff69d0e" exitCode=0 Oct 11 04:14:16 crc kubenswrapper[4798]: I1011 04:14:16.871267 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e3e55d17-f35e-4c87-b08a-8f6d8800ebf3","Type":"ContainerDied","Data":"1a418281aa55263c9d1b564e523ae852ba08bf62b520dd2f7bb70c1feff69d0e"} Oct 11 04:14:16 crc kubenswrapper[4798]: I1011 04:14:16.871657 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e3e55d17-f35e-4c87-b08a-8f6d8800ebf3","Type":"ContainerDied","Data":"6aea42b4e5522cd3b344f71a3dac070b7d03945103dafc992c824ba84468681d"} Oct 11 04:14:16 crc kubenswrapper[4798]: I1011 04:14:16.871302 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Oct 11 04:14:16 crc kubenswrapper[4798]: I1011 04:14:16.871730 4798 scope.go:117] "RemoveContainer" containerID="64724848ea57443261bfed5ee339b35f38906fbc635050e9786f2e97714eedb7" Oct 11 04:14:16 crc kubenswrapper[4798]: I1011 04:14:16.951850 4798 scope.go:117] "RemoveContainer" containerID="61f1aeb503d37741b7ebb14ead5e6ee4ea5b1a0593d9edafa9fb78f36e468b4b" Oct 11 04:14:16 crc kubenswrapper[4798]: I1011 04:14:16.961624 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Oct 11 04:14:16 crc kubenswrapper[4798]: I1011 04:14:16.979691 4798 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Oct 11 04:14:16 crc kubenswrapper[4798]: I1011 04:14:16.985083 4798 scope.go:117] "RemoveContainer" containerID="1a418281aa55263c9d1b564e523ae852ba08bf62b520dd2f7bb70c1feff69d0e" Oct 11 04:14:16 crc kubenswrapper[4798]: I1011 04:14:16.989436 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Oct 11 04:14:16 crc kubenswrapper[4798]: E1011 04:14:16.990045 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e3e55d17-f35e-4c87-b08a-8f6d8800ebf3" containerName="ceilometer-notification-agent" Oct 11 04:14:16 crc kubenswrapper[4798]: I1011 04:14:16.990091 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="e3e55d17-f35e-4c87-b08a-8f6d8800ebf3" containerName="ceilometer-notification-agent" Oct 11 04:14:16 crc kubenswrapper[4798]: E1011 04:14:16.990105 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e3e55d17-f35e-4c87-b08a-8f6d8800ebf3" containerName="proxy-httpd" Oct 11 04:14:16 crc kubenswrapper[4798]: I1011 04:14:16.990113 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="e3e55d17-f35e-4c87-b08a-8f6d8800ebf3" containerName="proxy-httpd" Oct 11 04:14:16 crc kubenswrapper[4798]: E1011 04:14:16.990131 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e3e55d17-f35e-4c87-b08a-8f6d8800ebf3" containerName="sg-core" Oct 11 04:14:16 crc kubenswrapper[4798]: I1011 04:14:16.990157 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="e3e55d17-f35e-4c87-b08a-8f6d8800ebf3" containerName="sg-core" Oct 11 04:14:16 crc kubenswrapper[4798]: E1011 04:14:16.990197 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e3e55d17-f35e-4c87-b08a-8f6d8800ebf3" containerName="ceilometer-central-agent" Oct 11 04:14:16 crc kubenswrapper[4798]: I1011 04:14:16.990205 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="e3e55d17-f35e-4c87-b08a-8f6d8800ebf3" containerName="ceilometer-central-agent" Oct 11 04:14:16 crc kubenswrapper[4798]: I1011 04:14:16.990572 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="e3e55d17-f35e-4c87-b08a-8f6d8800ebf3" containerName="proxy-httpd" Oct 11 04:14:16 crc kubenswrapper[4798]: I1011 04:14:16.990611 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="e3e55d17-f35e-4c87-b08a-8f6d8800ebf3" containerName="sg-core" Oct 11 04:14:16 crc kubenswrapper[4798]: I1011 04:14:16.990644 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="e3e55d17-f35e-4c87-b08a-8f6d8800ebf3" containerName="ceilometer-notification-agent" Oct 11 04:14:16 crc kubenswrapper[4798]: I1011 04:14:16.990659 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="e3e55d17-f35e-4c87-b08a-8f6d8800ebf3" containerName="ceilometer-central-agent" Oct 11 04:14:16 crc kubenswrapper[4798]: I1011 04:14:16.992797 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Oct 11 04:14:16 crc kubenswrapper[4798]: I1011 04:14:16.994944 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Oct 11 04:14:16 crc kubenswrapper[4798]: I1011 04:14:16.995248 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Oct 11 04:14:16 crc kubenswrapper[4798]: I1011 04:14:16.997008 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Oct 11 04:14:17 crc kubenswrapper[4798]: I1011 04:14:17.002325 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Oct 11 04:14:17 crc kubenswrapper[4798]: I1011 04:14:17.008135 4798 scope.go:117] "RemoveContainer" containerID="05a92666dffc19e59f07cda0613c586e8e42483e7d638360864f45f8c20b70d7" Oct 11 04:14:17 crc kubenswrapper[4798]: I1011 04:14:17.034748 4798 scope.go:117] "RemoveContainer" containerID="64724848ea57443261bfed5ee339b35f38906fbc635050e9786f2e97714eedb7" Oct 11 04:14:17 crc kubenswrapper[4798]: E1011 04:14:17.035256 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"64724848ea57443261bfed5ee339b35f38906fbc635050e9786f2e97714eedb7\": container with ID starting with 64724848ea57443261bfed5ee339b35f38906fbc635050e9786f2e97714eedb7 not found: ID does not exist" containerID="64724848ea57443261bfed5ee339b35f38906fbc635050e9786f2e97714eedb7" Oct 11 04:14:17 crc kubenswrapper[4798]: I1011 04:14:17.035296 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"64724848ea57443261bfed5ee339b35f38906fbc635050e9786f2e97714eedb7"} err="failed to get container status \"64724848ea57443261bfed5ee339b35f38906fbc635050e9786f2e97714eedb7\": rpc error: code = NotFound desc = could not find container \"64724848ea57443261bfed5ee339b35f38906fbc635050e9786f2e97714eedb7\": container with ID starting with 64724848ea57443261bfed5ee339b35f38906fbc635050e9786f2e97714eedb7 not found: ID does not exist" Oct 11 04:14:17 crc kubenswrapper[4798]: I1011 04:14:17.035319 4798 scope.go:117] "RemoveContainer" containerID="61f1aeb503d37741b7ebb14ead5e6ee4ea5b1a0593d9edafa9fb78f36e468b4b" Oct 11 04:14:17 crc kubenswrapper[4798]: E1011 04:14:17.035886 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"61f1aeb503d37741b7ebb14ead5e6ee4ea5b1a0593d9edafa9fb78f36e468b4b\": container with ID starting with 61f1aeb503d37741b7ebb14ead5e6ee4ea5b1a0593d9edafa9fb78f36e468b4b not found: ID does not exist" containerID="61f1aeb503d37741b7ebb14ead5e6ee4ea5b1a0593d9edafa9fb78f36e468b4b" Oct 11 04:14:17 crc kubenswrapper[4798]: I1011 04:14:17.036071 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"61f1aeb503d37741b7ebb14ead5e6ee4ea5b1a0593d9edafa9fb78f36e468b4b"} err="failed to get container status \"61f1aeb503d37741b7ebb14ead5e6ee4ea5b1a0593d9edafa9fb78f36e468b4b\": rpc error: code = NotFound desc = could not find container \"61f1aeb503d37741b7ebb14ead5e6ee4ea5b1a0593d9edafa9fb78f36e468b4b\": container with ID starting with 61f1aeb503d37741b7ebb14ead5e6ee4ea5b1a0593d9edafa9fb78f36e468b4b not found: ID does not exist" Oct 11 04:14:17 crc kubenswrapper[4798]: I1011 04:14:17.036178 4798 scope.go:117] "RemoveContainer" containerID="1a418281aa55263c9d1b564e523ae852ba08bf62b520dd2f7bb70c1feff69d0e" Oct 11 04:14:17 crc kubenswrapper[4798]: E1011 04:14:17.037217 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1a418281aa55263c9d1b564e523ae852ba08bf62b520dd2f7bb70c1feff69d0e\": container with ID starting with 1a418281aa55263c9d1b564e523ae852ba08bf62b520dd2f7bb70c1feff69d0e not found: ID does not exist" containerID="1a418281aa55263c9d1b564e523ae852ba08bf62b520dd2f7bb70c1feff69d0e" Oct 11 04:14:17 crc kubenswrapper[4798]: I1011 04:14:17.037249 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1a418281aa55263c9d1b564e523ae852ba08bf62b520dd2f7bb70c1feff69d0e"} err="failed to get container status \"1a418281aa55263c9d1b564e523ae852ba08bf62b520dd2f7bb70c1feff69d0e\": rpc error: code = NotFound desc = could not find container \"1a418281aa55263c9d1b564e523ae852ba08bf62b520dd2f7bb70c1feff69d0e\": container with ID starting with 1a418281aa55263c9d1b564e523ae852ba08bf62b520dd2f7bb70c1feff69d0e not found: ID does not exist" Oct 11 04:14:17 crc kubenswrapper[4798]: I1011 04:14:17.037265 4798 scope.go:117] "RemoveContainer" containerID="05a92666dffc19e59f07cda0613c586e8e42483e7d638360864f45f8c20b70d7" Oct 11 04:14:17 crc kubenswrapper[4798]: E1011 04:14:17.037830 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"05a92666dffc19e59f07cda0613c586e8e42483e7d638360864f45f8c20b70d7\": container with ID starting with 05a92666dffc19e59f07cda0613c586e8e42483e7d638360864f45f8c20b70d7 not found: ID does not exist" containerID="05a92666dffc19e59f07cda0613c586e8e42483e7d638360864f45f8c20b70d7" Oct 11 04:14:17 crc kubenswrapper[4798]: I1011 04:14:17.037847 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"05a92666dffc19e59f07cda0613c586e8e42483e7d638360864f45f8c20b70d7"} err="failed to get container status \"05a92666dffc19e59f07cda0613c586e8e42483e7d638360864f45f8c20b70d7\": rpc error: code = NotFound desc = could not find container \"05a92666dffc19e59f07cda0613c586e8e42483e7d638360864f45f8c20b70d7\": container with ID starting with 05a92666dffc19e59f07cda0613c586e8e42483e7d638360864f45f8c20b70d7 not found: ID does not exist" Oct 11 04:14:17 crc kubenswrapper[4798]: I1011 04:14:17.163964 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nv8hd\" (UniqueName: \"kubernetes.io/projected/679fd723-9ff7-4d15-b769-3d709ed1f9ab-kube-api-access-nv8hd\") pod \"ceilometer-0\" (UID: \"679fd723-9ff7-4d15-b769-3d709ed1f9ab\") " pod="openstack/ceilometer-0" Oct 11 04:14:17 crc kubenswrapper[4798]: I1011 04:14:17.164019 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/679fd723-9ff7-4d15-b769-3d709ed1f9ab-config-data\") pod \"ceilometer-0\" (UID: \"679fd723-9ff7-4d15-b769-3d709ed1f9ab\") " pod="openstack/ceilometer-0" Oct 11 04:14:17 crc kubenswrapper[4798]: I1011 04:14:17.164044 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/679fd723-9ff7-4d15-b769-3d709ed1f9ab-run-httpd\") pod \"ceilometer-0\" (UID: \"679fd723-9ff7-4d15-b769-3d709ed1f9ab\") " pod="openstack/ceilometer-0" Oct 11 04:14:17 crc kubenswrapper[4798]: I1011 04:14:17.164058 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/679fd723-9ff7-4d15-b769-3d709ed1f9ab-log-httpd\") pod \"ceilometer-0\" (UID: \"679fd723-9ff7-4d15-b769-3d709ed1f9ab\") " pod="openstack/ceilometer-0" Oct 11 04:14:17 crc kubenswrapper[4798]: I1011 04:14:17.164100 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/679fd723-9ff7-4d15-b769-3d709ed1f9ab-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"679fd723-9ff7-4d15-b769-3d709ed1f9ab\") " pod="openstack/ceilometer-0" Oct 11 04:14:17 crc kubenswrapper[4798]: I1011 04:14:17.164125 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/679fd723-9ff7-4d15-b769-3d709ed1f9ab-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"679fd723-9ff7-4d15-b769-3d709ed1f9ab\") " pod="openstack/ceilometer-0" Oct 11 04:14:17 crc kubenswrapper[4798]: I1011 04:14:17.164179 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/679fd723-9ff7-4d15-b769-3d709ed1f9ab-scripts\") pod \"ceilometer-0\" (UID: \"679fd723-9ff7-4d15-b769-3d709ed1f9ab\") " pod="openstack/ceilometer-0" Oct 11 04:14:17 crc kubenswrapper[4798]: I1011 04:14:17.164280 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/679fd723-9ff7-4d15-b769-3d709ed1f9ab-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"679fd723-9ff7-4d15-b769-3d709ed1f9ab\") " pod="openstack/ceilometer-0" Oct 11 04:14:17 crc kubenswrapper[4798]: I1011 04:14:17.265959 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nv8hd\" (UniqueName: \"kubernetes.io/projected/679fd723-9ff7-4d15-b769-3d709ed1f9ab-kube-api-access-nv8hd\") pod \"ceilometer-0\" (UID: \"679fd723-9ff7-4d15-b769-3d709ed1f9ab\") " pod="openstack/ceilometer-0" Oct 11 04:14:17 crc kubenswrapper[4798]: I1011 04:14:17.266024 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/679fd723-9ff7-4d15-b769-3d709ed1f9ab-config-data\") pod \"ceilometer-0\" (UID: \"679fd723-9ff7-4d15-b769-3d709ed1f9ab\") " pod="openstack/ceilometer-0" Oct 11 04:14:17 crc kubenswrapper[4798]: I1011 04:14:17.266044 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/679fd723-9ff7-4d15-b769-3d709ed1f9ab-log-httpd\") pod \"ceilometer-0\" (UID: \"679fd723-9ff7-4d15-b769-3d709ed1f9ab\") " pod="openstack/ceilometer-0" Oct 11 04:14:17 crc kubenswrapper[4798]: I1011 04:14:17.266062 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/679fd723-9ff7-4d15-b769-3d709ed1f9ab-run-httpd\") pod \"ceilometer-0\" (UID: \"679fd723-9ff7-4d15-b769-3d709ed1f9ab\") " pod="openstack/ceilometer-0" Oct 11 04:14:17 crc kubenswrapper[4798]: I1011 04:14:17.266104 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/679fd723-9ff7-4d15-b769-3d709ed1f9ab-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"679fd723-9ff7-4d15-b769-3d709ed1f9ab\") " pod="openstack/ceilometer-0" Oct 11 04:14:17 crc kubenswrapper[4798]: I1011 04:14:17.266135 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/679fd723-9ff7-4d15-b769-3d709ed1f9ab-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"679fd723-9ff7-4d15-b769-3d709ed1f9ab\") " pod="openstack/ceilometer-0" Oct 11 04:14:17 crc kubenswrapper[4798]: I1011 04:14:17.266168 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/679fd723-9ff7-4d15-b769-3d709ed1f9ab-scripts\") pod \"ceilometer-0\" (UID: \"679fd723-9ff7-4d15-b769-3d709ed1f9ab\") " pod="openstack/ceilometer-0" Oct 11 04:14:17 crc kubenswrapper[4798]: I1011 04:14:17.266227 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/679fd723-9ff7-4d15-b769-3d709ed1f9ab-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"679fd723-9ff7-4d15-b769-3d709ed1f9ab\") " pod="openstack/ceilometer-0" Oct 11 04:14:17 crc kubenswrapper[4798]: I1011 04:14:17.266956 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/679fd723-9ff7-4d15-b769-3d709ed1f9ab-log-httpd\") pod \"ceilometer-0\" (UID: \"679fd723-9ff7-4d15-b769-3d709ed1f9ab\") " pod="openstack/ceilometer-0" Oct 11 04:14:17 crc kubenswrapper[4798]: I1011 04:14:17.267373 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/679fd723-9ff7-4d15-b769-3d709ed1f9ab-run-httpd\") pod \"ceilometer-0\" (UID: \"679fd723-9ff7-4d15-b769-3d709ed1f9ab\") " pod="openstack/ceilometer-0" Oct 11 04:14:17 crc kubenswrapper[4798]: I1011 04:14:17.271881 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/679fd723-9ff7-4d15-b769-3d709ed1f9ab-scripts\") pod \"ceilometer-0\" (UID: \"679fd723-9ff7-4d15-b769-3d709ed1f9ab\") " pod="openstack/ceilometer-0" Oct 11 04:14:17 crc kubenswrapper[4798]: I1011 04:14:17.272013 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/679fd723-9ff7-4d15-b769-3d709ed1f9ab-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"679fd723-9ff7-4d15-b769-3d709ed1f9ab\") " pod="openstack/ceilometer-0" Oct 11 04:14:17 crc kubenswrapper[4798]: I1011 04:14:17.273035 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/679fd723-9ff7-4d15-b769-3d709ed1f9ab-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"679fd723-9ff7-4d15-b769-3d709ed1f9ab\") " pod="openstack/ceilometer-0" Oct 11 04:14:17 crc kubenswrapper[4798]: I1011 04:14:17.274320 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/679fd723-9ff7-4d15-b769-3d709ed1f9ab-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"679fd723-9ff7-4d15-b769-3d709ed1f9ab\") " pod="openstack/ceilometer-0" Oct 11 04:14:17 crc kubenswrapper[4798]: I1011 04:14:17.277108 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/679fd723-9ff7-4d15-b769-3d709ed1f9ab-config-data\") pod \"ceilometer-0\" (UID: \"679fd723-9ff7-4d15-b769-3d709ed1f9ab\") " pod="openstack/ceilometer-0" Oct 11 04:14:17 crc kubenswrapper[4798]: I1011 04:14:17.285805 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nv8hd\" (UniqueName: \"kubernetes.io/projected/679fd723-9ff7-4d15-b769-3d709ed1f9ab-kube-api-access-nv8hd\") pod \"ceilometer-0\" (UID: \"679fd723-9ff7-4d15-b769-3d709ed1f9ab\") " pod="openstack/ceilometer-0" Oct 11 04:14:17 crc kubenswrapper[4798]: I1011 04:14:17.310114 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Oct 11 04:14:17 crc kubenswrapper[4798]: I1011 04:14:17.444560 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e3e55d17-f35e-4c87-b08a-8f6d8800ebf3" path="/var/lib/kubelet/pods/e3e55d17-f35e-4c87-b08a-8f6d8800ebf3/volumes" Oct 11 04:14:17 crc kubenswrapper[4798]: I1011 04:14:17.468687 4798 pod_container_manager_linux.go:210] "Failed to delete cgroup paths" cgroupName=["kubepods","besteffort","podc4ba238e-3ec7-4703-862c-a1f14abda9c9"] err="unable to destroy cgroup paths for cgroup [kubepods besteffort podc4ba238e-3ec7-4703-862c-a1f14abda9c9] : Timed out while waiting for systemd to remove kubepods-besteffort-podc4ba238e_3ec7_4703_862c_a1f14abda9c9.slice" Oct 11 04:14:17 crc kubenswrapper[4798]: E1011 04:14:17.468742 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to delete cgroup paths for [kubepods besteffort podc4ba238e-3ec7-4703-862c-a1f14abda9c9] : unable to destroy cgroup paths for cgroup [kubepods besteffort podc4ba238e-3ec7-4703-862c-a1f14abda9c9] : Timed out while waiting for systemd to remove kubepods-besteffort-podc4ba238e_3ec7_4703_862c_a1f14abda9c9.slice" pod="openstack/nova-scheduler-0" podUID="c4ba238e-3ec7-4703-862c-a1f14abda9c9" Oct 11 04:14:17 crc kubenswrapper[4798]: I1011 04:14:17.803098 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Oct 11 04:14:17 crc kubenswrapper[4798]: I1011 04:14:17.898530 4798 generic.go:334] "Generic (PLEG): container finished" podID="6590a18a-25c1-4edd-beef-82369b00055a" containerID="6cac8e54c7dc1456964bfd2358fb690a0e053512c28cef9a12317cb68cc3d4c7" exitCode=0 Oct 11 04:14:17 crc kubenswrapper[4798]: I1011 04:14:17.898621 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"6590a18a-25c1-4edd-beef-82369b00055a","Type":"ContainerDied","Data":"6cac8e54c7dc1456964bfd2358fb690a0e053512c28cef9a12317cb68cc3d4c7"} Oct 11 04:14:17 crc kubenswrapper[4798]: I1011 04:14:17.903587 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Oct 11 04:14:17 crc kubenswrapper[4798]: I1011 04:14:17.903665 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"679fd723-9ff7-4d15-b769-3d709ed1f9ab","Type":"ContainerStarted","Data":"5c79018e2ed96f374bf95f105d41860ae991ef1e79919403414d99795370b586"} Oct 11 04:14:17 crc kubenswrapper[4798]: I1011 04:14:17.936606 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Oct 11 04:14:17 crc kubenswrapper[4798]: I1011 04:14:17.946301 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Oct 11 04:14:17 crc kubenswrapper[4798]: I1011 04:14:17.949930 4798 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Oct 11 04:14:17 crc kubenswrapper[4798]: I1011 04:14:17.965537 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Oct 11 04:14:17 crc kubenswrapper[4798]: E1011 04:14:17.966106 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6590a18a-25c1-4edd-beef-82369b00055a" containerName="nova-api-api" Oct 11 04:14:17 crc kubenswrapper[4798]: I1011 04:14:17.966121 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="6590a18a-25c1-4edd-beef-82369b00055a" containerName="nova-api-api" Oct 11 04:14:17 crc kubenswrapper[4798]: E1011 04:14:17.966144 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6590a18a-25c1-4edd-beef-82369b00055a" containerName="nova-api-log" Oct 11 04:14:17 crc kubenswrapper[4798]: I1011 04:14:17.966150 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="6590a18a-25c1-4edd-beef-82369b00055a" containerName="nova-api-log" Oct 11 04:14:17 crc kubenswrapper[4798]: I1011 04:14:17.966365 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="6590a18a-25c1-4edd-beef-82369b00055a" containerName="nova-api-log" Oct 11 04:14:17 crc kubenswrapper[4798]: I1011 04:14:17.966384 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="6590a18a-25c1-4edd-beef-82369b00055a" containerName="nova-api-api" Oct 11 04:14:17 crc kubenswrapper[4798]: I1011 04:14:17.967107 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Oct 11 04:14:17 crc kubenswrapper[4798]: I1011 04:14:17.970086 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Oct 11 04:14:18 crc kubenswrapper[4798]: I1011 04:14:18.022695 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Oct 11 04:14:18 crc kubenswrapper[4798]: I1011 04:14:18.082453 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-46hjb\" (UniqueName: \"kubernetes.io/projected/6590a18a-25c1-4edd-beef-82369b00055a-kube-api-access-46hjb\") pod \"6590a18a-25c1-4edd-beef-82369b00055a\" (UID: \"6590a18a-25c1-4edd-beef-82369b00055a\") " Oct 11 04:14:18 crc kubenswrapper[4798]: I1011 04:14:18.082520 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6590a18a-25c1-4edd-beef-82369b00055a-logs\") pod \"6590a18a-25c1-4edd-beef-82369b00055a\" (UID: \"6590a18a-25c1-4edd-beef-82369b00055a\") " Oct 11 04:14:18 crc kubenswrapper[4798]: I1011 04:14:18.082754 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6590a18a-25c1-4edd-beef-82369b00055a-config-data\") pod \"6590a18a-25c1-4edd-beef-82369b00055a\" (UID: \"6590a18a-25c1-4edd-beef-82369b00055a\") " Oct 11 04:14:18 crc kubenswrapper[4798]: I1011 04:14:18.082821 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6590a18a-25c1-4edd-beef-82369b00055a-combined-ca-bundle\") pod \"6590a18a-25c1-4edd-beef-82369b00055a\" (UID: \"6590a18a-25c1-4edd-beef-82369b00055a\") " Oct 11 04:14:18 crc kubenswrapper[4798]: I1011 04:14:18.083171 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v5lpp\" (UniqueName: \"kubernetes.io/projected/17db9048-13f8-4d58-9098-c6f8ea322009-kube-api-access-v5lpp\") pod \"nova-scheduler-0\" (UID: \"17db9048-13f8-4d58-9098-c6f8ea322009\") " pod="openstack/nova-scheduler-0" Oct 11 04:14:18 crc kubenswrapper[4798]: I1011 04:14:18.083201 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/17db9048-13f8-4d58-9098-c6f8ea322009-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"17db9048-13f8-4d58-9098-c6f8ea322009\") " pod="openstack/nova-scheduler-0" Oct 11 04:14:18 crc kubenswrapper[4798]: I1011 04:14:18.083226 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/17db9048-13f8-4d58-9098-c6f8ea322009-config-data\") pod \"nova-scheduler-0\" (UID: \"17db9048-13f8-4d58-9098-c6f8ea322009\") " pod="openstack/nova-scheduler-0" Oct 11 04:14:18 crc kubenswrapper[4798]: I1011 04:14:18.083266 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6590a18a-25c1-4edd-beef-82369b00055a-logs" (OuterVolumeSpecName: "logs") pod "6590a18a-25c1-4edd-beef-82369b00055a" (UID: "6590a18a-25c1-4edd-beef-82369b00055a"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 04:14:18 crc kubenswrapper[4798]: I1011 04:14:18.106436 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6590a18a-25c1-4edd-beef-82369b00055a-kube-api-access-46hjb" (OuterVolumeSpecName: "kube-api-access-46hjb") pod "6590a18a-25c1-4edd-beef-82369b00055a" (UID: "6590a18a-25c1-4edd-beef-82369b00055a"). InnerVolumeSpecName "kube-api-access-46hjb". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 04:14:18 crc kubenswrapper[4798]: I1011 04:14:18.126761 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6590a18a-25c1-4edd-beef-82369b00055a-config-data" (OuterVolumeSpecName: "config-data") pod "6590a18a-25c1-4edd-beef-82369b00055a" (UID: "6590a18a-25c1-4edd-beef-82369b00055a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:14:18 crc kubenswrapper[4798]: I1011 04:14:18.131203 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6590a18a-25c1-4edd-beef-82369b00055a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6590a18a-25c1-4edd-beef-82369b00055a" (UID: "6590a18a-25c1-4edd-beef-82369b00055a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:14:18 crc kubenswrapper[4798]: I1011 04:14:18.185488 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v5lpp\" (UniqueName: \"kubernetes.io/projected/17db9048-13f8-4d58-9098-c6f8ea322009-kube-api-access-v5lpp\") pod \"nova-scheduler-0\" (UID: \"17db9048-13f8-4d58-9098-c6f8ea322009\") " pod="openstack/nova-scheduler-0" Oct 11 04:14:18 crc kubenswrapper[4798]: I1011 04:14:18.185810 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/17db9048-13f8-4d58-9098-c6f8ea322009-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"17db9048-13f8-4d58-9098-c6f8ea322009\") " pod="openstack/nova-scheduler-0" Oct 11 04:14:18 crc kubenswrapper[4798]: I1011 04:14:18.185861 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/17db9048-13f8-4d58-9098-c6f8ea322009-config-data\") pod \"nova-scheduler-0\" (UID: \"17db9048-13f8-4d58-9098-c6f8ea322009\") " pod="openstack/nova-scheduler-0" Oct 11 04:14:18 crc kubenswrapper[4798]: I1011 04:14:18.185941 4798 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6590a18a-25c1-4edd-beef-82369b00055a-config-data\") on node \"crc\" DevicePath \"\"" Oct 11 04:14:18 crc kubenswrapper[4798]: I1011 04:14:18.185952 4798 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6590a18a-25c1-4edd-beef-82369b00055a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 11 04:14:18 crc kubenswrapper[4798]: I1011 04:14:18.185964 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-46hjb\" (UniqueName: \"kubernetes.io/projected/6590a18a-25c1-4edd-beef-82369b00055a-kube-api-access-46hjb\") on node \"crc\" DevicePath \"\"" Oct 11 04:14:18 crc kubenswrapper[4798]: I1011 04:14:18.185974 4798 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6590a18a-25c1-4edd-beef-82369b00055a-logs\") on node \"crc\" DevicePath \"\"" Oct 11 04:14:18 crc kubenswrapper[4798]: I1011 04:14:18.195207 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/17db9048-13f8-4d58-9098-c6f8ea322009-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"17db9048-13f8-4d58-9098-c6f8ea322009\") " pod="openstack/nova-scheduler-0" Oct 11 04:14:18 crc kubenswrapper[4798]: I1011 04:14:18.197817 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/17db9048-13f8-4d58-9098-c6f8ea322009-config-data\") pod \"nova-scheduler-0\" (UID: \"17db9048-13f8-4d58-9098-c6f8ea322009\") " pod="openstack/nova-scheduler-0" Oct 11 04:14:18 crc kubenswrapper[4798]: I1011 04:14:18.205835 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v5lpp\" (UniqueName: \"kubernetes.io/projected/17db9048-13f8-4d58-9098-c6f8ea322009-kube-api-access-v5lpp\") pod \"nova-scheduler-0\" (UID: \"17db9048-13f8-4d58-9098-c6f8ea322009\") " pod="openstack/nova-scheduler-0" Oct 11 04:14:18 crc kubenswrapper[4798]: I1011 04:14:18.284908 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Oct 11 04:14:18 crc kubenswrapper[4798]: I1011 04:14:18.797822 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Oct 11 04:14:18 crc kubenswrapper[4798]: W1011 04:14:18.803045 4798 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod17db9048_13f8_4d58_9098_c6f8ea322009.slice/crio-a2556efb1e9a41547c5054d5a7fd50d2428e7d049e15d97787275540218c1377 WatchSource:0}: Error finding container a2556efb1e9a41547c5054d5a7fd50d2428e7d049e15d97787275540218c1377: Status 404 returned error can't find the container with id a2556efb1e9a41547c5054d5a7fd50d2428e7d049e15d97787275540218c1377 Oct 11 04:14:18 crc kubenswrapper[4798]: I1011 04:14:18.917664 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"679fd723-9ff7-4d15-b769-3d709ed1f9ab","Type":"ContainerStarted","Data":"1b10a01ae2b31c73f5fd84fdb2eaf23fc80e7eb2392c49efe6922e51d1f1d332"} Oct 11 04:14:18 crc kubenswrapper[4798]: I1011 04:14:18.920090 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Oct 11 04:14:18 crc kubenswrapper[4798]: I1011 04:14:18.920089 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"6590a18a-25c1-4edd-beef-82369b00055a","Type":"ContainerDied","Data":"d92fa79e052ae646acfb143ef9f0b7200c085fefe445bfe7e10ff324b5583c71"} Oct 11 04:14:18 crc kubenswrapper[4798]: I1011 04:14:18.920344 4798 scope.go:117] "RemoveContainer" containerID="6cac8e54c7dc1456964bfd2358fb690a0e053512c28cef9a12317cb68cc3d4c7" Oct 11 04:14:18 crc kubenswrapper[4798]: I1011 04:14:18.922121 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"17db9048-13f8-4d58-9098-c6f8ea322009","Type":"ContainerStarted","Data":"a2556efb1e9a41547c5054d5a7fd50d2428e7d049e15d97787275540218c1377"} Oct 11 04:14:18 crc kubenswrapper[4798]: I1011 04:14:18.941638 4798 scope.go:117] "RemoveContainer" containerID="b43be117c77833821d2dc38230f429404289bfa0b30f83b1ed13441b031d8b23" Oct 11 04:14:18 crc kubenswrapper[4798]: I1011 04:14:18.958961 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Oct 11 04:14:18 crc kubenswrapper[4798]: I1011 04:14:18.968784 4798 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Oct 11 04:14:18 crc kubenswrapper[4798]: I1011 04:14:18.986464 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Oct 11 04:14:18 crc kubenswrapper[4798]: I1011 04:14:18.988279 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Oct 11 04:14:18 crc kubenswrapper[4798]: I1011 04:14:18.990297 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Oct 11 04:14:18 crc kubenswrapper[4798]: I1011 04:14:18.993407 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Oct 11 04:14:18 crc kubenswrapper[4798]: I1011 04:14:18.993689 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Oct 11 04:14:19 crc kubenswrapper[4798]: I1011 04:14:19.002322 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Oct 11 04:14:19 crc kubenswrapper[4798]: I1011 04:14:19.104891 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3a88838e-6c61-40ca-8bec-fce2f8da0406-config-data\") pod \"nova-api-0\" (UID: \"3a88838e-6c61-40ca-8bec-fce2f8da0406\") " pod="openstack/nova-api-0" Oct 11 04:14:19 crc kubenswrapper[4798]: I1011 04:14:19.104995 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/3a88838e-6c61-40ca-8bec-fce2f8da0406-internal-tls-certs\") pod \"nova-api-0\" (UID: \"3a88838e-6c61-40ca-8bec-fce2f8da0406\") " pod="openstack/nova-api-0" Oct 11 04:14:19 crc kubenswrapper[4798]: I1011 04:14:19.105048 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m2hml\" (UniqueName: \"kubernetes.io/projected/3a88838e-6c61-40ca-8bec-fce2f8da0406-kube-api-access-m2hml\") pod \"nova-api-0\" (UID: \"3a88838e-6c61-40ca-8bec-fce2f8da0406\") " pod="openstack/nova-api-0" Oct 11 04:14:19 crc kubenswrapper[4798]: I1011 04:14:19.105072 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3a88838e-6c61-40ca-8bec-fce2f8da0406-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"3a88838e-6c61-40ca-8bec-fce2f8da0406\") " pod="openstack/nova-api-0" Oct 11 04:14:19 crc kubenswrapper[4798]: I1011 04:14:19.105122 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/3a88838e-6c61-40ca-8bec-fce2f8da0406-public-tls-certs\") pod \"nova-api-0\" (UID: \"3a88838e-6c61-40ca-8bec-fce2f8da0406\") " pod="openstack/nova-api-0" Oct 11 04:14:19 crc kubenswrapper[4798]: I1011 04:14:19.105144 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3a88838e-6c61-40ca-8bec-fce2f8da0406-logs\") pod \"nova-api-0\" (UID: \"3a88838e-6c61-40ca-8bec-fce2f8da0406\") " pod="openstack/nova-api-0" Oct 11 04:14:19 crc kubenswrapper[4798]: I1011 04:14:19.207826 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3a88838e-6c61-40ca-8bec-fce2f8da0406-config-data\") pod \"nova-api-0\" (UID: \"3a88838e-6c61-40ca-8bec-fce2f8da0406\") " pod="openstack/nova-api-0" Oct 11 04:14:19 crc kubenswrapper[4798]: I1011 04:14:19.207931 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/3a88838e-6c61-40ca-8bec-fce2f8da0406-internal-tls-certs\") pod \"nova-api-0\" (UID: \"3a88838e-6c61-40ca-8bec-fce2f8da0406\") " pod="openstack/nova-api-0" Oct 11 04:14:19 crc kubenswrapper[4798]: I1011 04:14:19.208061 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m2hml\" (UniqueName: \"kubernetes.io/projected/3a88838e-6c61-40ca-8bec-fce2f8da0406-kube-api-access-m2hml\") pod \"nova-api-0\" (UID: \"3a88838e-6c61-40ca-8bec-fce2f8da0406\") " pod="openstack/nova-api-0" Oct 11 04:14:19 crc kubenswrapper[4798]: I1011 04:14:19.208125 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3a88838e-6c61-40ca-8bec-fce2f8da0406-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"3a88838e-6c61-40ca-8bec-fce2f8da0406\") " pod="openstack/nova-api-0" Oct 11 04:14:19 crc kubenswrapper[4798]: I1011 04:14:19.208178 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/3a88838e-6c61-40ca-8bec-fce2f8da0406-public-tls-certs\") pod \"nova-api-0\" (UID: \"3a88838e-6c61-40ca-8bec-fce2f8da0406\") " pod="openstack/nova-api-0" Oct 11 04:14:19 crc kubenswrapper[4798]: I1011 04:14:19.208206 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3a88838e-6c61-40ca-8bec-fce2f8da0406-logs\") pod \"nova-api-0\" (UID: \"3a88838e-6c61-40ca-8bec-fce2f8da0406\") " pod="openstack/nova-api-0" Oct 11 04:14:19 crc kubenswrapper[4798]: I1011 04:14:19.208999 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3a88838e-6c61-40ca-8bec-fce2f8da0406-logs\") pod \"nova-api-0\" (UID: \"3a88838e-6c61-40ca-8bec-fce2f8da0406\") " pod="openstack/nova-api-0" Oct 11 04:14:19 crc kubenswrapper[4798]: I1011 04:14:19.217540 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/3a88838e-6c61-40ca-8bec-fce2f8da0406-internal-tls-certs\") pod \"nova-api-0\" (UID: \"3a88838e-6c61-40ca-8bec-fce2f8da0406\") " pod="openstack/nova-api-0" Oct 11 04:14:19 crc kubenswrapper[4798]: I1011 04:14:19.217550 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/3a88838e-6c61-40ca-8bec-fce2f8da0406-public-tls-certs\") pod \"nova-api-0\" (UID: \"3a88838e-6c61-40ca-8bec-fce2f8da0406\") " pod="openstack/nova-api-0" Oct 11 04:14:19 crc kubenswrapper[4798]: I1011 04:14:19.217766 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3a88838e-6c61-40ca-8bec-fce2f8da0406-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"3a88838e-6c61-40ca-8bec-fce2f8da0406\") " pod="openstack/nova-api-0" Oct 11 04:14:19 crc kubenswrapper[4798]: I1011 04:14:19.218559 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3a88838e-6c61-40ca-8bec-fce2f8da0406-config-data\") pod \"nova-api-0\" (UID: \"3a88838e-6c61-40ca-8bec-fce2f8da0406\") " pod="openstack/nova-api-0" Oct 11 04:14:19 crc kubenswrapper[4798]: I1011 04:14:19.238784 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m2hml\" (UniqueName: \"kubernetes.io/projected/3a88838e-6c61-40ca-8bec-fce2f8da0406-kube-api-access-m2hml\") pod \"nova-api-0\" (UID: \"3a88838e-6c61-40ca-8bec-fce2f8da0406\") " pod="openstack/nova-api-0" Oct 11 04:14:19 crc kubenswrapper[4798]: I1011 04:14:19.310811 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Oct 11 04:14:19 crc kubenswrapper[4798]: I1011 04:14:19.450952 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6590a18a-25c1-4edd-beef-82369b00055a" path="/var/lib/kubelet/pods/6590a18a-25c1-4edd-beef-82369b00055a/volumes" Oct 11 04:14:19 crc kubenswrapper[4798]: I1011 04:14:19.451938 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c4ba238e-3ec7-4703-862c-a1f14abda9c9" path="/var/lib/kubelet/pods/c4ba238e-3ec7-4703-862c-a1f14abda9c9/volumes" Oct 11 04:14:19 crc kubenswrapper[4798]: I1011 04:14:19.807894 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Oct 11 04:14:19 crc kubenswrapper[4798]: I1011 04:14:19.938868 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"17db9048-13f8-4d58-9098-c6f8ea322009","Type":"ContainerStarted","Data":"895b81559b2feee638af88ea26d014f217644c8ef6058e98aada6c3d9fba444a"} Oct 11 04:14:19 crc kubenswrapper[4798]: I1011 04:14:19.942944 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"679fd723-9ff7-4d15-b769-3d709ed1f9ab","Type":"ContainerStarted","Data":"d90b580096c09653e41e84e5f964e8af6a915f538f16413ff9076927b16fc167"} Oct 11 04:14:19 crc kubenswrapper[4798]: I1011 04:14:19.947910 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"3a88838e-6c61-40ca-8bec-fce2f8da0406","Type":"ContainerStarted","Data":"5b2fdc5eef3c991e7dee8763c7c60bc53385c8882a414cf0fb3759733dae2762"} Oct 11 04:14:19 crc kubenswrapper[4798]: I1011 04:14:19.963427 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.963381514 podStartE2EDuration="2.963381514s" podCreationTimestamp="2025-10-11 04:14:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 04:14:19.956428489 +0000 UTC m=+1155.292718175" watchObservedRunningTime="2025-10-11 04:14:19.963381514 +0000 UTC m=+1155.299671200" Oct 11 04:14:20 crc kubenswrapper[4798]: I1011 04:14:20.433239 4798 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-cell1-novncproxy-0" Oct 11 04:14:20 crc kubenswrapper[4798]: I1011 04:14:20.457122 4798 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-cell1-novncproxy-0" Oct 11 04:14:20 crc kubenswrapper[4798]: I1011 04:14:20.962369 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"679fd723-9ff7-4d15-b769-3d709ed1f9ab","Type":"ContainerStarted","Data":"6a3e25816582fceeef3240445751d55263c4943af22edc52f7d757cb05fad6bb"} Oct 11 04:14:20 crc kubenswrapper[4798]: I1011 04:14:20.965246 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"3a88838e-6c61-40ca-8bec-fce2f8da0406","Type":"ContainerStarted","Data":"bbf819f9e8154e7e56661e8b9811d35d520c0f63dff8bb32937b7484e711375c"} Oct 11 04:14:20 crc kubenswrapper[4798]: I1011 04:14:20.965281 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"3a88838e-6c61-40ca-8bec-fce2f8da0406","Type":"ContainerStarted","Data":"96b34afbd1450a1fd839e9f274623a0f3216ae73405db1d752c7568487ac36d0"} Oct 11 04:14:20 crc kubenswrapper[4798]: I1011 04:14:20.985018 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.984998098 podStartE2EDuration="2.984998098s" podCreationTimestamp="2025-10-11 04:14:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 04:14:20.984024695 +0000 UTC m=+1156.320314381" watchObservedRunningTime="2025-10-11 04:14:20.984998098 +0000 UTC m=+1156.321287784" Oct 11 04:14:20 crc kubenswrapper[4798]: I1011 04:14:20.992661 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-novncproxy-0" Oct 11 04:14:21 crc kubenswrapper[4798]: I1011 04:14:21.142708 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-cell-mapping-rnnjx"] Oct 11 04:14:21 crc kubenswrapper[4798]: I1011 04:14:21.144183 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-rnnjx" Oct 11 04:14:21 crc kubenswrapper[4798]: I1011 04:14:21.148042 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-scripts" Oct 11 04:14:21 crc kubenswrapper[4798]: I1011 04:14:21.148814 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-config-data" Oct 11 04:14:21 crc kubenswrapper[4798]: I1011 04:14:21.153238 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-rnnjx"] Oct 11 04:14:21 crc kubenswrapper[4798]: I1011 04:14:21.266018 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rntfb\" (UniqueName: \"kubernetes.io/projected/37feee20-a933-440b-8b62-59566bb9f440-kube-api-access-rntfb\") pod \"nova-cell1-cell-mapping-rnnjx\" (UID: \"37feee20-a933-440b-8b62-59566bb9f440\") " pod="openstack/nova-cell1-cell-mapping-rnnjx" Oct 11 04:14:21 crc kubenswrapper[4798]: I1011 04:14:21.266491 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/37feee20-a933-440b-8b62-59566bb9f440-scripts\") pod \"nova-cell1-cell-mapping-rnnjx\" (UID: \"37feee20-a933-440b-8b62-59566bb9f440\") " pod="openstack/nova-cell1-cell-mapping-rnnjx" Oct 11 04:14:21 crc kubenswrapper[4798]: I1011 04:14:21.266651 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/37feee20-a933-440b-8b62-59566bb9f440-config-data\") pod \"nova-cell1-cell-mapping-rnnjx\" (UID: \"37feee20-a933-440b-8b62-59566bb9f440\") " pod="openstack/nova-cell1-cell-mapping-rnnjx" Oct 11 04:14:21 crc kubenswrapper[4798]: I1011 04:14:21.266963 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/37feee20-a933-440b-8b62-59566bb9f440-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-rnnjx\" (UID: \"37feee20-a933-440b-8b62-59566bb9f440\") " pod="openstack/nova-cell1-cell-mapping-rnnjx" Oct 11 04:14:21 crc kubenswrapper[4798]: I1011 04:14:21.368510 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/37feee20-a933-440b-8b62-59566bb9f440-config-data\") pod \"nova-cell1-cell-mapping-rnnjx\" (UID: \"37feee20-a933-440b-8b62-59566bb9f440\") " pod="openstack/nova-cell1-cell-mapping-rnnjx" Oct 11 04:14:21 crc kubenswrapper[4798]: I1011 04:14:21.368609 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/37feee20-a933-440b-8b62-59566bb9f440-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-rnnjx\" (UID: \"37feee20-a933-440b-8b62-59566bb9f440\") " pod="openstack/nova-cell1-cell-mapping-rnnjx" Oct 11 04:14:21 crc kubenswrapper[4798]: I1011 04:14:21.368673 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rntfb\" (UniqueName: \"kubernetes.io/projected/37feee20-a933-440b-8b62-59566bb9f440-kube-api-access-rntfb\") pod \"nova-cell1-cell-mapping-rnnjx\" (UID: \"37feee20-a933-440b-8b62-59566bb9f440\") " pod="openstack/nova-cell1-cell-mapping-rnnjx" Oct 11 04:14:21 crc kubenswrapper[4798]: I1011 04:14:21.368742 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/37feee20-a933-440b-8b62-59566bb9f440-scripts\") pod \"nova-cell1-cell-mapping-rnnjx\" (UID: \"37feee20-a933-440b-8b62-59566bb9f440\") " pod="openstack/nova-cell1-cell-mapping-rnnjx" Oct 11 04:14:21 crc kubenswrapper[4798]: I1011 04:14:21.383803 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/37feee20-a933-440b-8b62-59566bb9f440-scripts\") pod \"nova-cell1-cell-mapping-rnnjx\" (UID: \"37feee20-a933-440b-8b62-59566bb9f440\") " pod="openstack/nova-cell1-cell-mapping-rnnjx" Oct 11 04:14:21 crc kubenswrapper[4798]: I1011 04:14:21.386202 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/37feee20-a933-440b-8b62-59566bb9f440-config-data\") pod \"nova-cell1-cell-mapping-rnnjx\" (UID: \"37feee20-a933-440b-8b62-59566bb9f440\") " pod="openstack/nova-cell1-cell-mapping-rnnjx" Oct 11 04:14:21 crc kubenswrapper[4798]: I1011 04:14:21.389018 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/37feee20-a933-440b-8b62-59566bb9f440-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-rnnjx\" (UID: \"37feee20-a933-440b-8b62-59566bb9f440\") " pod="openstack/nova-cell1-cell-mapping-rnnjx" Oct 11 04:14:21 crc kubenswrapper[4798]: I1011 04:14:21.392529 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rntfb\" (UniqueName: \"kubernetes.io/projected/37feee20-a933-440b-8b62-59566bb9f440-kube-api-access-rntfb\") pod \"nova-cell1-cell-mapping-rnnjx\" (UID: \"37feee20-a933-440b-8b62-59566bb9f440\") " pod="openstack/nova-cell1-cell-mapping-rnnjx" Oct 11 04:14:21 crc kubenswrapper[4798]: I1011 04:14:21.462386 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-rnnjx" Oct 11 04:14:21 crc kubenswrapper[4798]: I1011 04:14:21.595608 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-68d4b6d797-k76dd" Oct 11 04:14:21 crc kubenswrapper[4798]: I1011 04:14:21.666977 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-8b8cf6657-zdg5l"] Oct 11 04:14:21 crc kubenswrapper[4798]: I1011 04:14:21.667627 4798 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-8b8cf6657-zdg5l" podUID="2b43dc17-45f9-4c69-84cb-5590a4098add" containerName="dnsmasq-dns" containerID="cri-o://1b3374b1b370c07ed3cfc864fe3a8059fc5425c371e82d16ec7722e5cd1043fa" gracePeriod=10 Oct 11 04:14:21 crc kubenswrapper[4798]: I1011 04:14:21.975951 4798 generic.go:334] "Generic (PLEG): container finished" podID="2b43dc17-45f9-4c69-84cb-5590a4098add" containerID="1b3374b1b370c07ed3cfc864fe3a8059fc5425c371e82d16ec7722e5cd1043fa" exitCode=0 Oct 11 04:14:21 crc kubenswrapper[4798]: I1011 04:14:21.978119 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8b8cf6657-zdg5l" event={"ID":"2b43dc17-45f9-4c69-84cb-5590a4098add","Type":"ContainerDied","Data":"1b3374b1b370c07ed3cfc864fe3a8059fc5425c371e82d16ec7722e5cd1043fa"} Oct 11 04:14:22 crc kubenswrapper[4798]: I1011 04:14:22.018744 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-rnnjx"] Oct 11 04:14:22 crc kubenswrapper[4798]: I1011 04:14:22.104716 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8b8cf6657-zdg5l" Oct 11 04:14:22 crc kubenswrapper[4798]: I1011 04:14:22.288904 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2b43dc17-45f9-4c69-84cb-5590a4098add-config\") pod \"2b43dc17-45f9-4c69-84cb-5590a4098add\" (UID: \"2b43dc17-45f9-4c69-84cb-5590a4098add\") " Oct 11 04:14:22 crc kubenswrapper[4798]: I1011 04:14:22.288945 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2b43dc17-45f9-4c69-84cb-5590a4098add-dns-svc\") pod \"2b43dc17-45f9-4c69-84cb-5590a4098add\" (UID: \"2b43dc17-45f9-4c69-84cb-5590a4098add\") " Oct 11 04:14:22 crc kubenswrapper[4798]: I1011 04:14:22.288972 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dvh4l\" (UniqueName: \"kubernetes.io/projected/2b43dc17-45f9-4c69-84cb-5590a4098add-kube-api-access-dvh4l\") pod \"2b43dc17-45f9-4c69-84cb-5590a4098add\" (UID: \"2b43dc17-45f9-4c69-84cb-5590a4098add\") " Oct 11 04:14:22 crc kubenswrapper[4798]: I1011 04:14:22.289093 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2b43dc17-45f9-4c69-84cb-5590a4098add-ovsdbserver-sb\") pod \"2b43dc17-45f9-4c69-84cb-5590a4098add\" (UID: \"2b43dc17-45f9-4c69-84cb-5590a4098add\") " Oct 11 04:14:22 crc kubenswrapper[4798]: I1011 04:14:22.289149 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2b43dc17-45f9-4c69-84cb-5590a4098add-ovsdbserver-nb\") pod \"2b43dc17-45f9-4c69-84cb-5590a4098add\" (UID: \"2b43dc17-45f9-4c69-84cb-5590a4098add\") " Oct 11 04:14:22 crc kubenswrapper[4798]: I1011 04:14:22.293672 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2b43dc17-45f9-4c69-84cb-5590a4098add-kube-api-access-dvh4l" (OuterVolumeSpecName: "kube-api-access-dvh4l") pod "2b43dc17-45f9-4c69-84cb-5590a4098add" (UID: "2b43dc17-45f9-4c69-84cb-5590a4098add"). InnerVolumeSpecName "kube-api-access-dvh4l". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 04:14:22 crc kubenswrapper[4798]: I1011 04:14:22.333987 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2b43dc17-45f9-4c69-84cb-5590a4098add-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "2b43dc17-45f9-4c69-84cb-5590a4098add" (UID: "2b43dc17-45f9-4c69-84cb-5590a4098add"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 04:14:22 crc kubenswrapper[4798]: I1011 04:14:22.336819 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2b43dc17-45f9-4c69-84cb-5590a4098add-config" (OuterVolumeSpecName: "config") pod "2b43dc17-45f9-4c69-84cb-5590a4098add" (UID: "2b43dc17-45f9-4c69-84cb-5590a4098add"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 04:14:22 crc kubenswrapper[4798]: I1011 04:14:22.340530 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2b43dc17-45f9-4c69-84cb-5590a4098add-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "2b43dc17-45f9-4c69-84cb-5590a4098add" (UID: "2b43dc17-45f9-4c69-84cb-5590a4098add"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 04:14:22 crc kubenswrapper[4798]: I1011 04:14:22.346484 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2b43dc17-45f9-4c69-84cb-5590a4098add-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "2b43dc17-45f9-4c69-84cb-5590a4098add" (UID: "2b43dc17-45f9-4c69-84cb-5590a4098add"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 04:14:22 crc kubenswrapper[4798]: I1011 04:14:22.390777 4798 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2b43dc17-45f9-4c69-84cb-5590a4098add-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Oct 11 04:14:22 crc kubenswrapper[4798]: I1011 04:14:22.390819 4798 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2b43dc17-45f9-4c69-84cb-5590a4098add-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Oct 11 04:14:22 crc kubenswrapper[4798]: I1011 04:14:22.390831 4798 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2b43dc17-45f9-4c69-84cb-5590a4098add-config\") on node \"crc\" DevicePath \"\"" Oct 11 04:14:22 crc kubenswrapper[4798]: I1011 04:14:22.390840 4798 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2b43dc17-45f9-4c69-84cb-5590a4098add-dns-svc\") on node \"crc\" DevicePath \"\"" Oct 11 04:14:22 crc kubenswrapper[4798]: I1011 04:14:22.390850 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dvh4l\" (UniqueName: \"kubernetes.io/projected/2b43dc17-45f9-4c69-84cb-5590a4098add-kube-api-access-dvh4l\") on node \"crc\" DevicePath \"\"" Oct 11 04:14:22 crc kubenswrapper[4798]: I1011 04:14:22.997478 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-rnnjx" event={"ID":"37feee20-a933-440b-8b62-59566bb9f440","Type":"ContainerStarted","Data":"b8f9b12cc44036cadbd2c018e907b8a1f07c337830cbf32f0e0c4d991c918a41"} Oct 11 04:14:22 crc kubenswrapper[4798]: I1011 04:14:22.997914 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-rnnjx" event={"ID":"37feee20-a933-440b-8b62-59566bb9f440","Type":"ContainerStarted","Data":"243d045cba8825366e1a17e0510994d93f24dd60b420a8aa45589770497b7844"} Oct 11 04:14:23 crc kubenswrapper[4798]: I1011 04:14:23.005700 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8b8cf6657-zdg5l" Oct 11 04:14:23 crc kubenswrapper[4798]: I1011 04:14:23.006299 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8b8cf6657-zdg5l" event={"ID":"2b43dc17-45f9-4c69-84cb-5590a4098add","Type":"ContainerDied","Data":"177be28dfade2e70f43f82cb5272f7bc1a5c98bf543ea5205b1de90ac0031be2"} Oct 11 04:14:23 crc kubenswrapper[4798]: I1011 04:14:23.006359 4798 scope.go:117] "RemoveContainer" containerID="1b3374b1b370c07ed3cfc864fe3a8059fc5425c371e82d16ec7722e5cd1043fa" Oct 11 04:14:23 crc kubenswrapper[4798]: I1011 04:14:23.029856 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"679fd723-9ff7-4d15-b769-3d709ed1f9ab","Type":"ContainerStarted","Data":"79a4ab8d75501aa2ffa0b9b593d98a34639de7eb61c673ce43a444369f894791"} Oct 11 04:14:23 crc kubenswrapper[4798]: I1011 04:14:23.031622 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Oct 11 04:14:23 crc kubenswrapper[4798]: I1011 04:14:23.035276 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-cell-mapping-rnnjx" podStartSLOduration=2.035243134 podStartE2EDuration="2.035243134s" podCreationTimestamp="2025-10-11 04:14:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 04:14:23.014498911 +0000 UTC m=+1158.350788607" watchObservedRunningTime="2025-10-11 04:14:23.035243134 +0000 UTC m=+1158.371532830" Oct 11 04:14:23 crc kubenswrapper[4798]: I1011 04:14:23.043578 4798 scope.go:117] "RemoveContainer" containerID="13a26e002a8fced191e8e9f1273dc82d079919ede9b65cd71ca238c5bacd40e8" Oct 11 04:14:23 crc kubenswrapper[4798]: I1011 04:14:23.072514 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.863400764 podStartE2EDuration="7.072497572s" podCreationTimestamp="2025-10-11 04:14:16 +0000 UTC" firstStartedPulling="2025-10-11 04:14:17.82838285 +0000 UTC m=+1153.164672536" lastFinishedPulling="2025-10-11 04:14:22.037479658 +0000 UTC m=+1157.373769344" observedRunningTime="2025-10-11 04:14:23.071624201 +0000 UTC m=+1158.407913887" watchObservedRunningTime="2025-10-11 04:14:23.072497572 +0000 UTC m=+1158.408787258" Oct 11 04:14:23 crc kubenswrapper[4798]: I1011 04:14:23.100532 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-8b8cf6657-zdg5l"] Oct 11 04:14:23 crc kubenswrapper[4798]: I1011 04:14:23.108655 4798 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-8b8cf6657-zdg5l"] Oct 11 04:14:23 crc kubenswrapper[4798]: I1011 04:14:23.285302 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Oct 11 04:14:23 crc kubenswrapper[4798]: I1011 04:14:23.433035 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2b43dc17-45f9-4c69-84cb-5590a4098add" path="/var/lib/kubelet/pods/2b43dc17-45f9-4c69-84cb-5590a4098add/volumes" Oct 11 04:14:27 crc kubenswrapper[4798]: I1011 04:14:27.088712 4798 generic.go:334] "Generic (PLEG): container finished" podID="37feee20-a933-440b-8b62-59566bb9f440" containerID="b8f9b12cc44036cadbd2c018e907b8a1f07c337830cbf32f0e0c4d991c918a41" exitCode=0 Oct 11 04:14:27 crc kubenswrapper[4798]: I1011 04:14:27.088825 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-rnnjx" event={"ID":"37feee20-a933-440b-8b62-59566bb9f440","Type":"ContainerDied","Data":"b8f9b12cc44036cadbd2c018e907b8a1f07c337830cbf32f0e0c4d991c918a41"} Oct 11 04:14:28 crc kubenswrapper[4798]: I1011 04:14:28.285836 4798 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Oct 11 04:14:28 crc kubenswrapper[4798]: I1011 04:14:28.314642 4798 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Oct 11 04:14:28 crc kubenswrapper[4798]: I1011 04:14:28.421074 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-rnnjx" Oct 11 04:14:28 crc kubenswrapper[4798]: I1011 04:14:28.605169 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/37feee20-a933-440b-8b62-59566bb9f440-combined-ca-bundle\") pod \"37feee20-a933-440b-8b62-59566bb9f440\" (UID: \"37feee20-a933-440b-8b62-59566bb9f440\") " Oct 11 04:14:28 crc kubenswrapper[4798]: I1011 04:14:28.605242 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/37feee20-a933-440b-8b62-59566bb9f440-scripts\") pod \"37feee20-a933-440b-8b62-59566bb9f440\" (UID: \"37feee20-a933-440b-8b62-59566bb9f440\") " Oct 11 04:14:28 crc kubenswrapper[4798]: I1011 04:14:28.605294 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/37feee20-a933-440b-8b62-59566bb9f440-config-data\") pod \"37feee20-a933-440b-8b62-59566bb9f440\" (UID: \"37feee20-a933-440b-8b62-59566bb9f440\") " Oct 11 04:14:28 crc kubenswrapper[4798]: I1011 04:14:28.605331 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rntfb\" (UniqueName: \"kubernetes.io/projected/37feee20-a933-440b-8b62-59566bb9f440-kube-api-access-rntfb\") pod \"37feee20-a933-440b-8b62-59566bb9f440\" (UID: \"37feee20-a933-440b-8b62-59566bb9f440\") " Oct 11 04:14:28 crc kubenswrapper[4798]: I1011 04:14:28.611296 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/37feee20-a933-440b-8b62-59566bb9f440-kube-api-access-rntfb" (OuterVolumeSpecName: "kube-api-access-rntfb") pod "37feee20-a933-440b-8b62-59566bb9f440" (UID: "37feee20-a933-440b-8b62-59566bb9f440"). InnerVolumeSpecName "kube-api-access-rntfb". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 04:14:28 crc kubenswrapper[4798]: I1011 04:14:28.612386 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/37feee20-a933-440b-8b62-59566bb9f440-scripts" (OuterVolumeSpecName: "scripts") pod "37feee20-a933-440b-8b62-59566bb9f440" (UID: "37feee20-a933-440b-8b62-59566bb9f440"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:14:28 crc kubenswrapper[4798]: I1011 04:14:28.632846 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/37feee20-a933-440b-8b62-59566bb9f440-config-data" (OuterVolumeSpecName: "config-data") pod "37feee20-a933-440b-8b62-59566bb9f440" (UID: "37feee20-a933-440b-8b62-59566bb9f440"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:14:28 crc kubenswrapper[4798]: I1011 04:14:28.634387 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/37feee20-a933-440b-8b62-59566bb9f440-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "37feee20-a933-440b-8b62-59566bb9f440" (UID: "37feee20-a933-440b-8b62-59566bb9f440"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:14:28 crc kubenswrapper[4798]: I1011 04:14:28.707262 4798 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/37feee20-a933-440b-8b62-59566bb9f440-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 11 04:14:28 crc kubenswrapper[4798]: I1011 04:14:28.707304 4798 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/37feee20-a933-440b-8b62-59566bb9f440-scripts\") on node \"crc\" DevicePath \"\"" Oct 11 04:14:28 crc kubenswrapper[4798]: I1011 04:14:28.707314 4798 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/37feee20-a933-440b-8b62-59566bb9f440-config-data\") on node \"crc\" DevicePath \"\"" Oct 11 04:14:28 crc kubenswrapper[4798]: I1011 04:14:28.707324 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rntfb\" (UniqueName: \"kubernetes.io/projected/37feee20-a933-440b-8b62-59566bb9f440-kube-api-access-rntfb\") on node \"crc\" DevicePath \"\"" Oct 11 04:14:29 crc kubenswrapper[4798]: I1011 04:14:29.115451 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-rnnjx" event={"ID":"37feee20-a933-440b-8b62-59566bb9f440","Type":"ContainerDied","Data":"243d045cba8825366e1a17e0510994d93f24dd60b420a8aa45589770497b7844"} Oct 11 04:14:29 crc kubenswrapper[4798]: I1011 04:14:29.115511 4798 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="243d045cba8825366e1a17e0510994d93f24dd60b420a8aa45589770497b7844" Oct 11 04:14:29 crc kubenswrapper[4798]: I1011 04:14:29.115475 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-rnnjx" Oct 11 04:14:29 crc kubenswrapper[4798]: I1011 04:14:29.166353 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Oct 11 04:14:29 crc kubenswrapper[4798]: I1011 04:14:29.303929 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Oct 11 04:14:29 crc kubenswrapper[4798]: I1011 04:14:29.304219 4798 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="3a88838e-6c61-40ca-8bec-fce2f8da0406" containerName="nova-api-log" containerID="cri-o://96b34afbd1450a1fd839e9f274623a0f3216ae73405db1d752c7568487ac36d0" gracePeriod=30 Oct 11 04:14:29 crc kubenswrapper[4798]: I1011 04:14:29.304324 4798 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="3a88838e-6c61-40ca-8bec-fce2f8da0406" containerName="nova-api-api" containerID="cri-o://bbf819f9e8154e7e56661e8b9811d35d520c0f63dff8bb32937b7484e711375c" gracePeriod=30 Oct 11 04:14:29 crc kubenswrapper[4798]: I1011 04:14:29.354171 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Oct 11 04:14:29 crc kubenswrapper[4798]: I1011 04:14:29.354434 4798 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="8048fd5b-4d35-45d2-8120-233af208d713" containerName="nova-metadata-log" containerID="cri-o://b501c2408606b0754f13093b47a67a9b4c25676c5b419c257b70fc90a77a3384" gracePeriod=30 Oct 11 04:14:29 crc kubenswrapper[4798]: I1011 04:14:29.354524 4798 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="8048fd5b-4d35-45d2-8120-233af208d713" containerName="nova-metadata-metadata" containerID="cri-o://5d8e5d353fdf27cbc770ea9ddefabf016d056ecd8cc3bf5c6cdda2be41de405d" gracePeriod=30 Oct 11 04:14:29 crc kubenswrapper[4798]: I1011 04:14:29.641571 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Oct 11 04:14:29 crc kubenswrapper[4798]: I1011 04:14:29.879323 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Oct 11 04:14:30 crc kubenswrapper[4798]: I1011 04:14:30.034839 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3a88838e-6c61-40ca-8bec-fce2f8da0406-combined-ca-bundle\") pod \"3a88838e-6c61-40ca-8bec-fce2f8da0406\" (UID: \"3a88838e-6c61-40ca-8bec-fce2f8da0406\") " Oct 11 04:14:30 crc kubenswrapper[4798]: I1011 04:14:30.034924 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3a88838e-6c61-40ca-8bec-fce2f8da0406-logs\") pod \"3a88838e-6c61-40ca-8bec-fce2f8da0406\" (UID: \"3a88838e-6c61-40ca-8bec-fce2f8da0406\") " Oct 11 04:14:30 crc kubenswrapper[4798]: I1011 04:14:30.034947 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/3a88838e-6c61-40ca-8bec-fce2f8da0406-internal-tls-certs\") pod \"3a88838e-6c61-40ca-8bec-fce2f8da0406\" (UID: \"3a88838e-6c61-40ca-8bec-fce2f8da0406\") " Oct 11 04:14:30 crc kubenswrapper[4798]: I1011 04:14:30.035056 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m2hml\" (UniqueName: \"kubernetes.io/projected/3a88838e-6c61-40ca-8bec-fce2f8da0406-kube-api-access-m2hml\") pod \"3a88838e-6c61-40ca-8bec-fce2f8da0406\" (UID: \"3a88838e-6c61-40ca-8bec-fce2f8da0406\") " Oct 11 04:14:30 crc kubenswrapper[4798]: I1011 04:14:30.035098 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3a88838e-6c61-40ca-8bec-fce2f8da0406-config-data\") pod \"3a88838e-6c61-40ca-8bec-fce2f8da0406\" (UID: \"3a88838e-6c61-40ca-8bec-fce2f8da0406\") " Oct 11 04:14:30 crc kubenswrapper[4798]: I1011 04:14:30.035127 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/3a88838e-6c61-40ca-8bec-fce2f8da0406-public-tls-certs\") pod \"3a88838e-6c61-40ca-8bec-fce2f8da0406\" (UID: \"3a88838e-6c61-40ca-8bec-fce2f8da0406\") " Oct 11 04:14:30 crc kubenswrapper[4798]: I1011 04:14:30.035743 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3a88838e-6c61-40ca-8bec-fce2f8da0406-logs" (OuterVolumeSpecName: "logs") pod "3a88838e-6c61-40ca-8bec-fce2f8da0406" (UID: "3a88838e-6c61-40ca-8bec-fce2f8da0406"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 04:14:30 crc kubenswrapper[4798]: I1011 04:14:30.042261 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3a88838e-6c61-40ca-8bec-fce2f8da0406-kube-api-access-m2hml" (OuterVolumeSpecName: "kube-api-access-m2hml") pod "3a88838e-6c61-40ca-8bec-fce2f8da0406" (UID: "3a88838e-6c61-40ca-8bec-fce2f8da0406"). InnerVolumeSpecName "kube-api-access-m2hml". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 04:14:30 crc kubenswrapper[4798]: I1011 04:14:30.066256 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3a88838e-6c61-40ca-8bec-fce2f8da0406-config-data" (OuterVolumeSpecName: "config-data") pod "3a88838e-6c61-40ca-8bec-fce2f8da0406" (UID: "3a88838e-6c61-40ca-8bec-fce2f8da0406"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:14:30 crc kubenswrapper[4798]: I1011 04:14:30.066890 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3a88838e-6c61-40ca-8bec-fce2f8da0406-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "3a88838e-6c61-40ca-8bec-fce2f8da0406" (UID: "3a88838e-6c61-40ca-8bec-fce2f8da0406"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:14:30 crc kubenswrapper[4798]: I1011 04:14:30.084148 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3a88838e-6c61-40ca-8bec-fce2f8da0406-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "3a88838e-6c61-40ca-8bec-fce2f8da0406" (UID: "3a88838e-6c61-40ca-8bec-fce2f8da0406"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:14:30 crc kubenswrapper[4798]: I1011 04:14:30.085905 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3a88838e-6c61-40ca-8bec-fce2f8da0406-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "3a88838e-6c61-40ca-8bec-fce2f8da0406" (UID: "3a88838e-6c61-40ca-8bec-fce2f8da0406"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:14:30 crc kubenswrapper[4798]: I1011 04:14:30.130171 4798 generic.go:334] "Generic (PLEG): container finished" podID="3a88838e-6c61-40ca-8bec-fce2f8da0406" containerID="bbf819f9e8154e7e56661e8b9811d35d520c0f63dff8bb32937b7484e711375c" exitCode=0 Oct 11 04:14:30 crc kubenswrapper[4798]: I1011 04:14:30.130219 4798 generic.go:334] "Generic (PLEG): container finished" podID="3a88838e-6c61-40ca-8bec-fce2f8da0406" containerID="96b34afbd1450a1fd839e9f274623a0f3216ae73405db1d752c7568487ac36d0" exitCode=143 Oct 11 04:14:30 crc kubenswrapper[4798]: I1011 04:14:30.130312 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Oct 11 04:14:30 crc kubenswrapper[4798]: I1011 04:14:30.130344 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"3a88838e-6c61-40ca-8bec-fce2f8da0406","Type":"ContainerDied","Data":"bbf819f9e8154e7e56661e8b9811d35d520c0f63dff8bb32937b7484e711375c"} Oct 11 04:14:30 crc kubenswrapper[4798]: I1011 04:14:30.130385 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"3a88838e-6c61-40ca-8bec-fce2f8da0406","Type":"ContainerDied","Data":"96b34afbd1450a1fd839e9f274623a0f3216ae73405db1d752c7568487ac36d0"} Oct 11 04:14:30 crc kubenswrapper[4798]: I1011 04:14:30.130430 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"3a88838e-6c61-40ca-8bec-fce2f8da0406","Type":"ContainerDied","Data":"5b2fdc5eef3c991e7dee8763c7c60bc53385c8882a414cf0fb3759733dae2762"} Oct 11 04:14:30 crc kubenswrapper[4798]: I1011 04:14:30.130456 4798 scope.go:117] "RemoveContainer" containerID="bbf819f9e8154e7e56661e8b9811d35d520c0f63dff8bb32937b7484e711375c" Oct 11 04:14:30 crc kubenswrapper[4798]: I1011 04:14:30.137168 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m2hml\" (UniqueName: \"kubernetes.io/projected/3a88838e-6c61-40ca-8bec-fce2f8da0406-kube-api-access-m2hml\") on node \"crc\" DevicePath \"\"" Oct 11 04:14:30 crc kubenswrapper[4798]: I1011 04:14:30.137210 4798 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3a88838e-6c61-40ca-8bec-fce2f8da0406-config-data\") on node \"crc\" DevicePath \"\"" Oct 11 04:14:30 crc kubenswrapper[4798]: I1011 04:14:30.137224 4798 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/3a88838e-6c61-40ca-8bec-fce2f8da0406-public-tls-certs\") on node \"crc\" DevicePath \"\"" Oct 11 04:14:30 crc kubenswrapper[4798]: I1011 04:14:30.137237 4798 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3a88838e-6c61-40ca-8bec-fce2f8da0406-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 11 04:14:30 crc kubenswrapper[4798]: I1011 04:14:30.137248 4798 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3a88838e-6c61-40ca-8bec-fce2f8da0406-logs\") on node \"crc\" DevicePath \"\"" Oct 11 04:14:30 crc kubenswrapper[4798]: I1011 04:14:30.137260 4798 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/3a88838e-6c61-40ca-8bec-fce2f8da0406-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Oct 11 04:14:30 crc kubenswrapper[4798]: I1011 04:14:30.142632 4798 generic.go:334] "Generic (PLEG): container finished" podID="8048fd5b-4d35-45d2-8120-233af208d713" containerID="b501c2408606b0754f13093b47a67a9b4c25676c5b419c257b70fc90a77a3384" exitCode=143 Oct 11 04:14:30 crc kubenswrapper[4798]: I1011 04:14:30.142709 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"8048fd5b-4d35-45d2-8120-233af208d713","Type":"ContainerDied","Data":"b501c2408606b0754f13093b47a67a9b4c25676c5b419c257b70fc90a77a3384"} Oct 11 04:14:30 crc kubenswrapper[4798]: I1011 04:14:30.167026 4798 scope.go:117] "RemoveContainer" containerID="96b34afbd1450a1fd839e9f274623a0f3216ae73405db1d752c7568487ac36d0" Oct 11 04:14:30 crc kubenswrapper[4798]: I1011 04:14:30.179607 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Oct 11 04:14:30 crc kubenswrapper[4798]: I1011 04:14:30.196206 4798 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Oct 11 04:14:30 crc kubenswrapper[4798]: I1011 04:14:30.207199 4798 scope.go:117] "RemoveContainer" containerID="bbf819f9e8154e7e56661e8b9811d35d520c0f63dff8bb32937b7484e711375c" Oct 11 04:14:30 crc kubenswrapper[4798]: E1011 04:14:30.207759 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bbf819f9e8154e7e56661e8b9811d35d520c0f63dff8bb32937b7484e711375c\": container with ID starting with bbf819f9e8154e7e56661e8b9811d35d520c0f63dff8bb32937b7484e711375c not found: ID does not exist" containerID="bbf819f9e8154e7e56661e8b9811d35d520c0f63dff8bb32937b7484e711375c" Oct 11 04:14:30 crc kubenswrapper[4798]: I1011 04:14:30.207800 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bbf819f9e8154e7e56661e8b9811d35d520c0f63dff8bb32937b7484e711375c"} err="failed to get container status \"bbf819f9e8154e7e56661e8b9811d35d520c0f63dff8bb32937b7484e711375c\": rpc error: code = NotFound desc = could not find container \"bbf819f9e8154e7e56661e8b9811d35d520c0f63dff8bb32937b7484e711375c\": container with ID starting with bbf819f9e8154e7e56661e8b9811d35d520c0f63dff8bb32937b7484e711375c not found: ID does not exist" Oct 11 04:14:30 crc kubenswrapper[4798]: I1011 04:14:30.207831 4798 scope.go:117] "RemoveContainer" containerID="96b34afbd1450a1fd839e9f274623a0f3216ae73405db1d752c7568487ac36d0" Oct 11 04:14:30 crc kubenswrapper[4798]: E1011 04:14:30.208093 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"96b34afbd1450a1fd839e9f274623a0f3216ae73405db1d752c7568487ac36d0\": container with ID starting with 96b34afbd1450a1fd839e9f274623a0f3216ae73405db1d752c7568487ac36d0 not found: ID does not exist" containerID="96b34afbd1450a1fd839e9f274623a0f3216ae73405db1d752c7568487ac36d0" Oct 11 04:14:30 crc kubenswrapper[4798]: I1011 04:14:30.208132 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"96b34afbd1450a1fd839e9f274623a0f3216ae73405db1d752c7568487ac36d0"} err="failed to get container status \"96b34afbd1450a1fd839e9f274623a0f3216ae73405db1d752c7568487ac36d0\": rpc error: code = NotFound desc = could not find container \"96b34afbd1450a1fd839e9f274623a0f3216ae73405db1d752c7568487ac36d0\": container with ID starting with 96b34afbd1450a1fd839e9f274623a0f3216ae73405db1d752c7568487ac36d0 not found: ID does not exist" Oct 11 04:14:30 crc kubenswrapper[4798]: I1011 04:14:30.208150 4798 scope.go:117] "RemoveContainer" containerID="bbf819f9e8154e7e56661e8b9811d35d520c0f63dff8bb32937b7484e711375c" Oct 11 04:14:30 crc kubenswrapper[4798]: I1011 04:14:30.208110 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Oct 11 04:14:30 crc kubenswrapper[4798]: I1011 04:14:30.208431 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bbf819f9e8154e7e56661e8b9811d35d520c0f63dff8bb32937b7484e711375c"} err="failed to get container status \"bbf819f9e8154e7e56661e8b9811d35d520c0f63dff8bb32937b7484e711375c\": rpc error: code = NotFound desc = could not find container \"bbf819f9e8154e7e56661e8b9811d35d520c0f63dff8bb32937b7484e711375c\": container with ID starting with bbf819f9e8154e7e56661e8b9811d35d520c0f63dff8bb32937b7484e711375c not found: ID does not exist" Oct 11 04:14:30 crc kubenswrapper[4798]: I1011 04:14:30.208463 4798 scope.go:117] "RemoveContainer" containerID="96b34afbd1450a1fd839e9f274623a0f3216ae73405db1d752c7568487ac36d0" Oct 11 04:14:30 crc kubenswrapper[4798]: I1011 04:14:30.208707 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"96b34afbd1450a1fd839e9f274623a0f3216ae73405db1d752c7568487ac36d0"} err="failed to get container status \"96b34afbd1450a1fd839e9f274623a0f3216ae73405db1d752c7568487ac36d0\": rpc error: code = NotFound desc = could not find container \"96b34afbd1450a1fd839e9f274623a0f3216ae73405db1d752c7568487ac36d0\": container with ID starting with 96b34afbd1450a1fd839e9f274623a0f3216ae73405db1d752c7568487ac36d0 not found: ID does not exist" Oct 11 04:14:30 crc kubenswrapper[4798]: E1011 04:14:30.208877 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2b43dc17-45f9-4c69-84cb-5590a4098add" containerName="init" Oct 11 04:14:30 crc kubenswrapper[4798]: I1011 04:14:30.208909 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="2b43dc17-45f9-4c69-84cb-5590a4098add" containerName="init" Oct 11 04:14:30 crc kubenswrapper[4798]: E1011 04:14:30.208920 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3a88838e-6c61-40ca-8bec-fce2f8da0406" containerName="nova-api-log" Oct 11 04:14:30 crc kubenswrapper[4798]: I1011 04:14:30.208928 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="3a88838e-6c61-40ca-8bec-fce2f8da0406" containerName="nova-api-log" Oct 11 04:14:30 crc kubenswrapper[4798]: E1011 04:14:30.208946 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3a88838e-6c61-40ca-8bec-fce2f8da0406" containerName="nova-api-api" Oct 11 04:14:30 crc kubenswrapper[4798]: I1011 04:14:30.208971 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="3a88838e-6c61-40ca-8bec-fce2f8da0406" containerName="nova-api-api" Oct 11 04:14:30 crc kubenswrapper[4798]: E1011 04:14:30.208984 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="37feee20-a933-440b-8b62-59566bb9f440" containerName="nova-manage" Oct 11 04:14:30 crc kubenswrapper[4798]: I1011 04:14:30.208992 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="37feee20-a933-440b-8b62-59566bb9f440" containerName="nova-manage" Oct 11 04:14:30 crc kubenswrapper[4798]: E1011 04:14:30.209018 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2b43dc17-45f9-4c69-84cb-5590a4098add" containerName="dnsmasq-dns" Oct 11 04:14:30 crc kubenswrapper[4798]: I1011 04:14:30.209027 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="2b43dc17-45f9-4c69-84cb-5590a4098add" containerName="dnsmasq-dns" Oct 11 04:14:30 crc kubenswrapper[4798]: I1011 04:14:30.209256 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="37feee20-a933-440b-8b62-59566bb9f440" containerName="nova-manage" Oct 11 04:14:30 crc kubenswrapper[4798]: I1011 04:14:30.209276 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="2b43dc17-45f9-4c69-84cb-5590a4098add" containerName="dnsmasq-dns" Oct 11 04:14:30 crc kubenswrapper[4798]: I1011 04:14:30.209300 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="3a88838e-6c61-40ca-8bec-fce2f8da0406" containerName="nova-api-api" Oct 11 04:14:30 crc kubenswrapper[4798]: I1011 04:14:30.209318 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="3a88838e-6c61-40ca-8bec-fce2f8da0406" containerName="nova-api-log" Oct 11 04:14:30 crc kubenswrapper[4798]: I1011 04:14:30.210640 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Oct 11 04:14:30 crc kubenswrapper[4798]: I1011 04:14:30.216635 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Oct 11 04:14:30 crc kubenswrapper[4798]: I1011 04:14:30.216888 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Oct 11 04:14:30 crc kubenswrapper[4798]: I1011 04:14:30.217147 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Oct 11 04:14:30 crc kubenswrapper[4798]: I1011 04:14:30.217535 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Oct 11 04:14:30 crc kubenswrapper[4798]: I1011 04:14:30.339906 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3498eff7-9fc6-42de-ab90-0df1ab533041-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"3498eff7-9fc6-42de-ab90-0df1ab533041\") " pod="openstack/nova-api-0" Oct 11 04:14:30 crc kubenswrapper[4798]: I1011 04:14:30.341366 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3498eff7-9fc6-42de-ab90-0df1ab533041-config-data\") pod \"nova-api-0\" (UID: \"3498eff7-9fc6-42de-ab90-0df1ab533041\") " pod="openstack/nova-api-0" Oct 11 04:14:30 crc kubenswrapper[4798]: I1011 04:14:30.341610 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/3498eff7-9fc6-42de-ab90-0df1ab533041-public-tls-certs\") pod \"nova-api-0\" (UID: \"3498eff7-9fc6-42de-ab90-0df1ab533041\") " pod="openstack/nova-api-0" Oct 11 04:14:30 crc kubenswrapper[4798]: I1011 04:14:30.341741 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/3498eff7-9fc6-42de-ab90-0df1ab533041-internal-tls-certs\") pod \"nova-api-0\" (UID: \"3498eff7-9fc6-42de-ab90-0df1ab533041\") " pod="openstack/nova-api-0" Oct 11 04:14:30 crc kubenswrapper[4798]: I1011 04:14:30.341896 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kjkkq\" (UniqueName: \"kubernetes.io/projected/3498eff7-9fc6-42de-ab90-0df1ab533041-kube-api-access-kjkkq\") pod \"nova-api-0\" (UID: \"3498eff7-9fc6-42de-ab90-0df1ab533041\") " pod="openstack/nova-api-0" Oct 11 04:14:30 crc kubenswrapper[4798]: I1011 04:14:30.342011 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3498eff7-9fc6-42de-ab90-0df1ab533041-logs\") pod \"nova-api-0\" (UID: \"3498eff7-9fc6-42de-ab90-0df1ab533041\") " pod="openstack/nova-api-0" Oct 11 04:14:30 crc kubenswrapper[4798]: I1011 04:14:30.443752 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3498eff7-9fc6-42de-ab90-0df1ab533041-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"3498eff7-9fc6-42de-ab90-0df1ab533041\") " pod="openstack/nova-api-0" Oct 11 04:14:30 crc kubenswrapper[4798]: I1011 04:14:30.443850 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3498eff7-9fc6-42de-ab90-0df1ab533041-config-data\") pod \"nova-api-0\" (UID: \"3498eff7-9fc6-42de-ab90-0df1ab533041\") " pod="openstack/nova-api-0" Oct 11 04:14:30 crc kubenswrapper[4798]: I1011 04:14:30.443934 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/3498eff7-9fc6-42de-ab90-0df1ab533041-public-tls-certs\") pod \"nova-api-0\" (UID: \"3498eff7-9fc6-42de-ab90-0df1ab533041\") " pod="openstack/nova-api-0" Oct 11 04:14:30 crc kubenswrapper[4798]: I1011 04:14:30.443965 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/3498eff7-9fc6-42de-ab90-0df1ab533041-internal-tls-certs\") pod \"nova-api-0\" (UID: \"3498eff7-9fc6-42de-ab90-0df1ab533041\") " pod="openstack/nova-api-0" Oct 11 04:14:30 crc kubenswrapper[4798]: I1011 04:14:30.444021 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kjkkq\" (UniqueName: \"kubernetes.io/projected/3498eff7-9fc6-42de-ab90-0df1ab533041-kube-api-access-kjkkq\") pod \"nova-api-0\" (UID: \"3498eff7-9fc6-42de-ab90-0df1ab533041\") " pod="openstack/nova-api-0" Oct 11 04:14:30 crc kubenswrapper[4798]: I1011 04:14:30.444051 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3498eff7-9fc6-42de-ab90-0df1ab533041-logs\") pod \"nova-api-0\" (UID: \"3498eff7-9fc6-42de-ab90-0df1ab533041\") " pod="openstack/nova-api-0" Oct 11 04:14:30 crc kubenswrapper[4798]: I1011 04:14:30.444675 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3498eff7-9fc6-42de-ab90-0df1ab533041-logs\") pod \"nova-api-0\" (UID: \"3498eff7-9fc6-42de-ab90-0df1ab533041\") " pod="openstack/nova-api-0" Oct 11 04:14:30 crc kubenswrapper[4798]: I1011 04:14:30.449149 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/3498eff7-9fc6-42de-ab90-0df1ab533041-public-tls-certs\") pod \"nova-api-0\" (UID: \"3498eff7-9fc6-42de-ab90-0df1ab533041\") " pod="openstack/nova-api-0" Oct 11 04:14:30 crc kubenswrapper[4798]: I1011 04:14:30.449990 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3498eff7-9fc6-42de-ab90-0df1ab533041-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"3498eff7-9fc6-42de-ab90-0df1ab533041\") " pod="openstack/nova-api-0" Oct 11 04:14:30 crc kubenswrapper[4798]: I1011 04:14:30.450665 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3498eff7-9fc6-42de-ab90-0df1ab533041-config-data\") pod \"nova-api-0\" (UID: \"3498eff7-9fc6-42de-ab90-0df1ab533041\") " pod="openstack/nova-api-0" Oct 11 04:14:30 crc kubenswrapper[4798]: I1011 04:14:30.454000 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/3498eff7-9fc6-42de-ab90-0df1ab533041-internal-tls-certs\") pod \"nova-api-0\" (UID: \"3498eff7-9fc6-42de-ab90-0df1ab533041\") " pod="openstack/nova-api-0" Oct 11 04:14:30 crc kubenswrapper[4798]: I1011 04:14:30.465114 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kjkkq\" (UniqueName: \"kubernetes.io/projected/3498eff7-9fc6-42de-ab90-0df1ab533041-kube-api-access-kjkkq\") pod \"nova-api-0\" (UID: \"3498eff7-9fc6-42de-ab90-0df1ab533041\") " pod="openstack/nova-api-0" Oct 11 04:14:30 crc kubenswrapper[4798]: I1011 04:14:30.549683 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Oct 11 04:14:31 crc kubenswrapper[4798]: I1011 04:14:31.020313 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Oct 11 04:14:31 crc kubenswrapper[4798]: I1011 04:14:31.156610 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"3498eff7-9fc6-42de-ab90-0df1ab533041","Type":"ContainerStarted","Data":"0c52753777d173e27289bdfb9b8db58c0a2334abb8d0757b8872000e2f8d6a85"} Oct 11 04:14:31 crc kubenswrapper[4798]: I1011 04:14:31.156787 4798 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="17db9048-13f8-4d58-9098-c6f8ea322009" containerName="nova-scheduler-scheduler" containerID="cri-o://895b81559b2feee638af88ea26d014f217644c8ef6058e98aada6c3d9fba444a" gracePeriod=30 Oct 11 04:14:31 crc kubenswrapper[4798]: I1011 04:14:31.434143 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3a88838e-6c61-40ca-8bec-fce2f8da0406" path="/var/lib/kubelet/pods/3a88838e-6c61-40ca-8bec-fce2f8da0406/volumes" Oct 11 04:14:32 crc kubenswrapper[4798]: I1011 04:14:32.168569 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"3498eff7-9fc6-42de-ab90-0df1ab533041","Type":"ContainerStarted","Data":"8be2079888460d9c1837c605edcea0f5be5aa1a7401665eaa7d7efecdcb6589d"} Oct 11 04:14:32 crc kubenswrapper[4798]: I1011 04:14:32.169355 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"3498eff7-9fc6-42de-ab90-0df1ab533041","Type":"ContainerStarted","Data":"7c3b010b7f44f809320fc63d8850e0ab266c25e152cde635396d2a8f6ce5c847"} Oct 11 04:14:32 crc kubenswrapper[4798]: I1011 04:14:32.196194 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.196168622 podStartE2EDuration="2.196168622s" podCreationTimestamp="2025-10-11 04:14:30 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 04:14:32.187452515 +0000 UTC m=+1167.523742211" watchObservedRunningTime="2025-10-11 04:14:32.196168622 +0000 UTC m=+1167.532458318" Oct 11 04:14:32 crc kubenswrapper[4798]: I1011 04:14:32.493746 4798 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="8048fd5b-4d35-45d2-8120-233af208d713" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.178:8775/\": read tcp 10.217.0.2:48624->10.217.0.178:8775: read: connection reset by peer" Oct 11 04:14:32 crc kubenswrapper[4798]: I1011 04:14:32.493806 4798 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="8048fd5b-4d35-45d2-8120-233af208d713" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.178:8775/\": read tcp 10.217.0.2:48630->10.217.0.178:8775: read: connection reset by peer" Oct 11 04:14:32 crc kubenswrapper[4798]: I1011 04:14:32.961274 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Oct 11 04:14:33 crc kubenswrapper[4798]: I1011 04:14:33.016430 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8048fd5b-4d35-45d2-8120-233af208d713-config-data\") pod \"8048fd5b-4d35-45d2-8120-233af208d713\" (UID: \"8048fd5b-4d35-45d2-8120-233af208d713\") " Oct 11 04:14:33 crc kubenswrapper[4798]: I1011 04:14:33.016551 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lc6px\" (UniqueName: \"kubernetes.io/projected/8048fd5b-4d35-45d2-8120-233af208d713-kube-api-access-lc6px\") pod \"8048fd5b-4d35-45d2-8120-233af208d713\" (UID: \"8048fd5b-4d35-45d2-8120-233af208d713\") " Oct 11 04:14:33 crc kubenswrapper[4798]: I1011 04:14:33.016675 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8048fd5b-4d35-45d2-8120-233af208d713-logs\") pod \"8048fd5b-4d35-45d2-8120-233af208d713\" (UID: \"8048fd5b-4d35-45d2-8120-233af208d713\") " Oct 11 04:14:33 crc kubenswrapper[4798]: I1011 04:14:33.016703 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8048fd5b-4d35-45d2-8120-233af208d713-combined-ca-bundle\") pod \"8048fd5b-4d35-45d2-8120-233af208d713\" (UID: \"8048fd5b-4d35-45d2-8120-233af208d713\") " Oct 11 04:14:33 crc kubenswrapper[4798]: I1011 04:14:33.016840 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/8048fd5b-4d35-45d2-8120-233af208d713-nova-metadata-tls-certs\") pod \"8048fd5b-4d35-45d2-8120-233af208d713\" (UID: \"8048fd5b-4d35-45d2-8120-233af208d713\") " Oct 11 04:14:33 crc kubenswrapper[4798]: I1011 04:14:33.017244 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8048fd5b-4d35-45d2-8120-233af208d713-logs" (OuterVolumeSpecName: "logs") pod "8048fd5b-4d35-45d2-8120-233af208d713" (UID: "8048fd5b-4d35-45d2-8120-233af208d713"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 04:14:33 crc kubenswrapper[4798]: I1011 04:14:33.017889 4798 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8048fd5b-4d35-45d2-8120-233af208d713-logs\") on node \"crc\" DevicePath \"\"" Oct 11 04:14:33 crc kubenswrapper[4798]: I1011 04:14:33.044702 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8048fd5b-4d35-45d2-8120-233af208d713-kube-api-access-lc6px" (OuterVolumeSpecName: "kube-api-access-lc6px") pod "8048fd5b-4d35-45d2-8120-233af208d713" (UID: "8048fd5b-4d35-45d2-8120-233af208d713"). InnerVolumeSpecName "kube-api-access-lc6px". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 04:14:33 crc kubenswrapper[4798]: I1011 04:14:33.055204 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8048fd5b-4d35-45d2-8120-233af208d713-config-data" (OuterVolumeSpecName: "config-data") pod "8048fd5b-4d35-45d2-8120-233af208d713" (UID: "8048fd5b-4d35-45d2-8120-233af208d713"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:14:33 crc kubenswrapper[4798]: I1011 04:14:33.055281 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8048fd5b-4d35-45d2-8120-233af208d713-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "8048fd5b-4d35-45d2-8120-233af208d713" (UID: "8048fd5b-4d35-45d2-8120-233af208d713"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:14:33 crc kubenswrapper[4798]: I1011 04:14:33.083848 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8048fd5b-4d35-45d2-8120-233af208d713-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "8048fd5b-4d35-45d2-8120-233af208d713" (UID: "8048fd5b-4d35-45d2-8120-233af208d713"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:14:33 crc kubenswrapper[4798]: I1011 04:14:33.119630 4798 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8048fd5b-4d35-45d2-8120-233af208d713-config-data\") on node \"crc\" DevicePath \"\"" Oct 11 04:14:33 crc kubenswrapper[4798]: I1011 04:14:33.119895 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lc6px\" (UniqueName: \"kubernetes.io/projected/8048fd5b-4d35-45d2-8120-233af208d713-kube-api-access-lc6px\") on node \"crc\" DevicePath \"\"" Oct 11 04:14:33 crc kubenswrapper[4798]: I1011 04:14:33.119995 4798 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8048fd5b-4d35-45d2-8120-233af208d713-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 11 04:14:33 crc kubenswrapper[4798]: I1011 04:14:33.120052 4798 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/8048fd5b-4d35-45d2-8120-233af208d713-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Oct 11 04:14:33 crc kubenswrapper[4798]: I1011 04:14:33.190524 4798 generic.go:334] "Generic (PLEG): container finished" podID="8048fd5b-4d35-45d2-8120-233af208d713" containerID="5d8e5d353fdf27cbc770ea9ddefabf016d056ecd8cc3bf5c6cdda2be41de405d" exitCode=0 Oct 11 04:14:33 crc kubenswrapper[4798]: I1011 04:14:33.191866 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"8048fd5b-4d35-45d2-8120-233af208d713","Type":"ContainerDied","Data":"5d8e5d353fdf27cbc770ea9ddefabf016d056ecd8cc3bf5c6cdda2be41de405d"} Oct 11 04:14:33 crc kubenswrapper[4798]: I1011 04:14:33.191946 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"8048fd5b-4d35-45d2-8120-233af208d713","Type":"ContainerDied","Data":"54d0d749caffc9b8411eb3f33f7f2eabee7e8ec713b7625090631a37253a44ab"} Oct 11 04:14:33 crc kubenswrapper[4798]: I1011 04:14:33.191967 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Oct 11 04:14:33 crc kubenswrapper[4798]: I1011 04:14:33.191970 4798 scope.go:117] "RemoveContainer" containerID="5d8e5d353fdf27cbc770ea9ddefabf016d056ecd8cc3bf5c6cdda2be41de405d" Oct 11 04:14:33 crc kubenswrapper[4798]: I1011 04:14:33.214711 4798 scope.go:117] "RemoveContainer" containerID="b501c2408606b0754f13093b47a67a9b4c25676c5b419c257b70fc90a77a3384" Oct 11 04:14:33 crc kubenswrapper[4798]: I1011 04:14:33.230422 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Oct 11 04:14:33 crc kubenswrapper[4798]: I1011 04:14:33.241407 4798 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Oct 11 04:14:33 crc kubenswrapper[4798]: I1011 04:14:33.250742 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Oct 11 04:14:33 crc kubenswrapper[4798]: E1011 04:14:33.251586 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8048fd5b-4d35-45d2-8120-233af208d713" containerName="nova-metadata-metadata" Oct 11 04:14:33 crc kubenswrapper[4798]: I1011 04:14:33.251658 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="8048fd5b-4d35-45d2-8120-233af208d713" containerName="nova-metadata-metadata" Oct 11 04:14:33 crc kubenswrapper[4798]: E1011 04:14:33.251775 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8048fd5b-4d35-45d2-8120-233af208d713" containerName="nova-metadata-log" Oct 11 04:14:33 crc kubenswrapper[4798]: I1011 04:14:33.251870 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="8048fd5b-4d35-45d2-8120-233af208d713" containerName="nova-metadata-log" Oct 11 04:14:33 crc kubenswrapper[4798]: I1011 04:14:33.251707 4798 scope.go:117] "RemoveContainer" containerID="5d8e5d353fdf27cbc770ea9ddefabf016d056ecd8cc3bf5c6cdda2be41de405d" Oct 11 04:14:33 crc kubenswrapper[4798]: I1011 04:14:33.252219 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="8048fd5b-4d35-45d2-8120-233af208d713" containerName="nova-metadata-log" Oct 11 04:14:33 crc kubenswrapper[4798]: I1011 04:14:33.252298 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="8048fd5b-4d35-45d2-8120-233af208d713" containerName="nova-metadata-metadata" Oct 11 04:14:33 crc kubenswrapper[4798]: E1011 04:14:33.252458 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5d8e5d353fdf27cbc770ea9ddefabf016d056ecd8cc3bf5c6cdda2be41de405d\": container with ID starting with 5d8e5d353fdf27cbc770ea9ddefabf016d056ecd8cc3bf5c6cdda2be41de405d not found: ID does not exist" containerID="5d8e5d353fdf27cbc770ea9ddefabf016d056ecd8cc3bf5c6cdda2be41de405d" Oct 11 04:14:33 crc kubenswrapper[4798]: I1011 04:14:33.252502 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5d8e5d353fdf27cbc770ea9ddefabf016d056ecd8cc3bf5c6cdda2be41de405d"} err="failed to get container status \"5d8e5d353fdf27cbc770ea9ddefabf016d056ecd8cc3bf5c6cdda2be41de405d\": rpc error: code = NotFound desc = could not find container \"5d8e5d353fdf27cbc770ea9ddefabf016d056ecd8cc3bf5c6cdda2be41de405d\": container with ID starting with 5d8e5d353fdf27cbc770ea9ddefabf016d056ecd8cc3bf5c6cdda2be41de405d not found: ID does not exist" Oct 11 04:14:33 crc kubenswrapper[4798]: I1011 04:14:33.252529 4798 scope.go:117] "RemoveContainer" containerID="b501c2408606b0754f13093b47a67a9b4c25676c5b419c257b70fc90a77a3384" Oct 11 04:14:33 crc kubenswrapper[4798]: E1011 04:14:33.252911 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b501c2408606b0754f13093b47a67a9b4c25676c5b419c257b70fc90a77a3384\": container with ID starting with b501c2408606b0754f13093b47a67a9b4c25676c5b419c257b70fc90a77a3384 not found: ID does not exist" containerID="b501c2408606b0754f13093b47a67a9b4c25676c5b419c257b70fc90a77a3384" Oct 11 04:14:33 crc kubenswrapper[4798]: I1011 04:14:33.252941 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b501c2408606b0754f13093b47a67a9b4c25676c5b419c257b70fc90a77a3384"} err="failed to get container status \"b501c2408606b0754f13093b47a67a9b4c25676c5b419c257b70fc90a77a3384\": rpc error: code = NotFound desc = could not find container \"b501c2408606b0754f13093b47a67a9b4c25676c5b419c257b70fc90a77a3384\": container with ID starting with b501c2408606b0754f13093b47a67a9b4c25676c5b419c257b70fc90a77a3384 not found: ID does not exist" Oct 11 04:14:33 crc kubenswrapper[4798]: I1011 04:14:33.253664 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Oct 11 04:14:33 crc kubenswrapper[4798]: I1011 04:14:33.256815 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Oct 11 04:14:33 crc kubenswrapper[4798]: I1011 04:14:33.257035 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Oct 11 04:14:33 crc kubenswrapper[4798]: I1011 04:14:33.259693 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Oct 11 04:14:33 crc kubenswrapper[4798]: E1011 04:14:33.289127 4798 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="895b81559b2feee638af88ea26d014f217644c8ef6058e98aada6c3d9fba444a" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Oct 11 04:14:33 crc kubenswrapper[4798]: E1011 04:14:33.290969 4798 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="895b81559b2feee638af88ea26d014f217644c8ef6058e98aada6c3d9fba444a" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Oct 11 04:14:33 crc kubenswrapper[4798]: E1011 04:14:33.292273 4798 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="895b81559b2feee638af88ea26d014f217644c8ef6058e98aada6c3d9fba444a" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Oct 11 04:14:33 crc kubenswrapper[4798]: E1011 04:14:33.292378 4798 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="17db9048-13f8-4d58-9098-c6f8ea322009" containerName="nova-scheduler-scheduler" Oct 11 04:14:33 crc kubenswrapper[4798]: I1011 04:14:33.424898 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/9fa916be-a581-493a-ade0-f5683c1d31e1-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"9fa916be-a581-493a-ade0-f5683c1d31e1\") " pod="openstack/nova-metadata-0" Oct 11 04:14:33 crc kubenswrapper[4798]: I1011 04:14:33.424944 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9fa916be-a581-493a-ade0-f5683c1d31e1-config-data\") pod \"nova-metadata-0\" (UID: \"9fa916be-a581-493a-ade0-f5683c1d31e1\") " pod="openstack/nova-metadata-0" Oct 11 04:14:33 crc kubenswrapper[4798]: I1011 04:14:33.424994 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9fa916be-a581-493a-ade0-f5683c1d31e1-logs\") pod \"nova-metadata-0\" (UID: \"9fa916be-a581-493a-ade0-f5683c1d31e1\") " pod="openstack/nova-metadata-0" Oct 11 04:14:33 crc kubenswrapper[4798]: I1011 04:14:33.425238 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c7542\" (UniqueName: \"kubernetes.io/projected/9fa916be-a581-493a-ade0-f5683c1d31e1-kube-api-access-c7542\") pod \"nova-metadata-0\" (UID: \"9fa916be-a581-493a-ade0-f5683c1d31e1\") " pod="openstack/nova-metadata-0" Oct 11 04:14:33 crc kubenswrapper[4798]: I1011 04:14:33.425347 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9fa916be-a581-493a-ade0-f5683c1d31e1-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"9fa916be-a581-493a-ade0-f5683c1d31e1\") " pod="openstack/nova-metadata-0" Oct 11 04:14:33 crc kubenswrapper[4798]: I1011 04:14:33.437274 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8048fd5b-4d35-45d2-8120-233af208d713" path="/var/lib/kubelet/pods/8048fd5b-4d35-45d2-8120-233af208d713/volumes" Oct 11 04:14:33 crc kubenswrapper[4798]: I1011 04:14:33.538445 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9fa916be-a581-493a-ade0-f5683c1d31e1-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"9fa916be-a581-493a-ade0-f5683c1d31e1\") " pod="openstack/nova-metadata-0" Oct 11 04:14:33 crc kubenswrapper[4798]: I1011 04:14:33.538711 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/9fa916be-a581-493a-ade0-f5683c1d31e1-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"9fa916be-a581-493a-ade0-f5683c1d31e1\") " pod="openstack/nova-metadata-0" Oct 11 04:14:33 crc kubenswrapper[4798]: I1011 04:14:33.538763 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9fa916be-a581-493a-ade0-f5683c1d31e1-config-data\") pod \"nova-metadata-0\" (UID: \"9fa916be-a581-493a-ade0-f5683c1d31e1\") " pod="openstack/nova-metadata-0" Oct 11 04:14:33 crc kubenswrapper[4798]: I1011 04:14:33.538888 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9fa916be-a581-493a-ade0-f5683c1d31e1-logs\") pod \"nova-metadata-0\" (UID: \"9fa916be-a581-493a-ade0-f5683c1d31e1\") " pod="openstack/nova-metadata-0" Oct 11 04:14:33 crc kubenswrapper[4798]: I1011 04:14:33.538970 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c7542\" (UniqueName: \"kubernetes.io/projected/9fa916be-a581-493a-ade0-f5683c1d31e1-kube-api-access-c7542\") pod \"nova-metadata-0\" (UID: \"9fa916be-a581-493a-ade0-f5683c1d31e1\") " pod="openstack/nova-metadata-0" Oct 11 04:14:33 crc kubenswrapper[4798]: I1011 04:14:33.541530 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9fa916be-a581-493a-ade0-f5683c1d31e1-logs\") pod \"nova-metadata-0\" (UID: \"9fa916be-a581-493a-ade0-f5683c1d31e1\") " pod="openstack/nova-metadata-0" Oct 11 04:14:33 crc kubenswrapper[4798]: I1011 04:14:33.545364 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9fa916be-a581-493a-ade0-f5683c1d31e1-config-data\") pod \"nova-metadata-0\" (UID: \"9fa916be-a581-493a-ade0-f5683c1d31e1\") " pod="openstack/nova-metadata-0" Oct 11 04:14:33 crc kubenswrapper[4798]: I1011 04:14:33.545655 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9fa916be-a581-493a-ade0-f5683c1d31e1-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"9fa916be-a581-493a-ade0-f5683c1d31e1\") " pod="openstack/nova-metadata-0" Oct 11 04:14:33 crc kubenswrapper[4798]: I1011 04:14:33.546122 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/9fa916be-a581-493a-ade0-f5683c1d31e1-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"9fa916be-a581-493a-ade0-f5683c1d31e1\") " pod="openstack/nova-metadata-0" Oct 11 04:14:33 crc kubenswrapper[4798]: I1011 04:14:33.556828 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c7542\" (UniqueName: \"kubernetes.io/projected/9fa916be-a581-493a-ade0-f5683c1d31e1-kube-api-access-c7542\") pod \"nova-metadata-0\" (UID: \"9fa916be-a581-493a-ade0-f5683c1d31e1\") " pod="openstack/nova-metadata-0" Oct 11 04:14:33 crc kubenswrapper[4798]: I1011 04:14:33.580126 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Oct 11 04:14:34 crc kubenswrapper[4798]: I1011 04:14:34.036480 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Oct 11 04:14:34 crc kubenswrapper[4798]: W1011 04:14:34.042666 4798 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9fa916be_a581_493a_ade0_f5683c1d31e1.slice/crio-6599309fec3758a73544db7bfc4861c1229f239e8bf5b39b60b9b0a62b27a7e2 WatchSource:0}: Error finding container 6599309fec3758a73544db7bfc4861c1229f239e8bf5b39b60b9b0a62b27a7e2: Status 404 returned error can't find the container with id 6599309fec3758a73544db7bfc4861c1229f239e8bf5b39b60b9b0a62b27a7e2 Oct 11 04:14:34 crc kubenswrapper[4798]: I1011 04:14:34.218196 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"9fa916be-a581-493a-ade0-f5683c1d31e1","Type":"ContainerStarted","Data":"6599309fec3758a73544db7bfc4861c1229f239e8bf5b39b60b9b0a62b27a7e2"} Oct 11 04:14:35 crc kubenswrapper[4798]: I1011 04:14:35.233899 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"9fa916be-a581-493a-ade0-f5683c1d31e1","Type":"ContainerStarted","Data":"ce55f26dc31f7984670c3cbcec7e8e17dd2f2adc2956eb11c22a2089f4501f8d"} Oct 11 04:14:35 crc kubenswrapper[4798]: I1011 04:14:35.233960 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"9fa916be-a581-493a-ade0-f5683c1d31e1","Type":"ContainerStarted","Data":"23f5dcf04e68fd36bd6544fa2e67a13869aee74ecc35b766dabfcf1a43ac6b92"} Oct 11 04:14:35 crc kubenswrapper[4798]: I1011 04:14:35.256050 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.256029288 podStartE2EDuration="2.256029288s" podCreationTimestamp="2025-10-11 04:14:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 04:14:35.2532008 +0000 UTC m=+1170.589490486" watchObservedRunningTime="2025-10-11 04:14:35.256029288 +0000 UTC m=+1170.592318974" Oct 11 04:14:35 crc kubenswrapper[4798]: I1011 04:14:35.961485 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Oct 11 04:14:36 crc kubenswrapper[4798]: I1011 04:14:36.097425 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v5lpp\" (UniqueName: \"kubernetes.io/projected/17db9048-13f8-4d58-9098-c6f8ea322009-kube-api-access-v5lpp\") pod \"17db9048-13f8-4d58-9098-c6f8ea322009\" (UID: \"17db9048-13f8-4d58-9098-c6f8ea322009\") " Oct 11 04:14:36 crc kubenswrapper[4798]: I1011 04:14:36.097475 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/17db9048-13f8-4d58-9098-c6f8ea322009-combined-ca-bundle\") pod \"17db9048-13f8-4d58-9098-c6f8ea322009\" (UID: \"17db9048-13f8-4d58-9098-c6f8ea322009\") " Oct 11 04:14:36 crc kubenswrapper[4798]: I1011 04:14:36.097676 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/17db9048-13f8-4d58-9098-c6f8ea322009-config-data\") pod \"17db9048-13f8-4d58-9098-c6f8ea322009\" (UID: \"17db9048-13f8-4d58-9098-c6f8ea322009\") " Oct 11 04:14:36 crc kubenswrapper[4798]: I1011 04:14:36.108368 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/17db9048-13f8-4d58-9098-c6f8ea322009-kube-api-access-v5lpp" (OuterVolumeSpecName: "kube-api-access-v5lpp") pod "17db9048-13f8-4d58-9098-c6f8ea322009" (UID: "17db9048-13f8-4d58-9098-c6f8ea322009"). InnerVolumeSpecName "kube-api-access-v5lpp". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 04:14:36 crc kubenswrapper[4798]: I1011 04:14:36.133040 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/17db9048-13f8-4d58-9098-c6f8ea322009-config-data" (OuterVolumeSpecName: "config-data") pod "17db9048-13f8-4d58-9098-c6f8ea322009" (UID: "17db9048-13f8-4d58-9098-c6f8ea322009"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:14:36 crc kubenswrapper[4798]: I1011 04:14:36.137002 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/17db9048-13f8-4d58-9098-c6f8ea322009-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "17db9048-13f8-4d58-9098-c6f8ea322009" (UID: "17db9048-13f8-4d58-9098-c6f8ea322009"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:14:36 crc kubenswrapper[4798]: I1011 04:14:36.201527 4798 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/17db9048-13f8-4d58-9098-c6f8ea322009-config-data\") on node \"crc\" DevicePath \"\"" Oct 11 04:14:36 crc kubenswrapper[4798]: I1011 04:14:36.201577 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v5lpp\" (UniqueName: \"kubernetes.io/projected/17db9048-13f8-4d58-9098-c6f8ea322009-kube-api-access-v5lpp\") on node \"crc\" DevicePath \"\"" Oct 11 04:14:36 crc kubenswrapper[4798]: I1011 04:14:36.201599 4798 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/17db9048-13f8-4d58-9098-c6f8ea322009-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 11 04:14:36 crc kubenswrapper[4798]: I1011 04:14:36.242557 4798 generic.go:334] "Generic (PLEG): container finished" podID="17db9048-13f8-4d58-9098-c6f8ea322009" containerID="895b81559b2feee638af88ea26d014f217644c8ef6058e98aada6c3d9fba444a" exitCode=0 Oct 11 04:14:36 crc kubenswrapper[4798]: I1011 04:14:36.243661 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Oct 11 04:14:36 crc kubenswrapper[4798]: I1011 04:14:36.244087 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"17db9048-13f8-4d58-9098-c6f8ea322009","Type":"ContainerDied","Data":"895b81559b2feee638af88ea26d014f217644c8ef6058e98aada6c3d9fba444a"} Oct 11 04:14:36 crc kubenswrapper[4798]: I1011 04:14:36.244193 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"17db9048-13f8-4d58-9098-c6f8ea322009","Type":"ContainerDied","Data":"a2556efb1e9a41547c5054d5a7fd50d2428e7d049e15d97787275540218c1377"} Oct 11 04:14:36 crc kubenswrapper[4798]: I1011 04:14:36.244269 4798 scope.go:117] "RemoveContainer" containerID="895b81559b2feee638af88ea26d014f217644c8ef6058e98aada6c3d9fba444a" Oct 11 04:14:36 crc kubenswrapper[4798]: I1011 04:14:36.276346 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Oct 11 04:14:36 crc kubenswrapper[4798]: I1011 04:14:36.285182 4798 scope.go:117] "RemoveContainer" containerID="895b81559b2feee638af88ea26d014f217644c8ef6058e98aada6c3d9fba444a" Oct 11 04:14:36 crc kubenswrapper[4798]: E1011 04:14:36.286373 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"895b81559b2feee638af88ea26d014f217644c8ef6058e98aada6c3d9fba444a\": container with ID starting with 895b81559b2feee638af88ea26d014f217644c8ef6058e98aada6c3d9fba444a not found: ID does not exist" containerID="895b81559b2feee638af88ea26d014f217644c8ef6058e98aada6c3d9fba444a" Oct 11 04:14:36 crc kubenswrapper[4798]: I1011 04:14:36.286466 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"895b81559b2feee638af88ea26d014f217644c8ef6058e98aada6c3d9fba444a"} err="failed to get container status \"895b81559b2feee638af88ea26d014f217644c8ef6058e98aada6c3d9fba444a\": rpc error: code = NotFound desc = could not find container \"895b81559b2feee638af88ea26d014f217644c8ef6058e98aada6c3d9fba444a\": container with ID starting with 895b81559b2feee638af88ea26d014f217644c8ef6058e98aada6c3d9fba444a not found: ID does not exist" Oct 11 04:14:36 crc kubenswrapper[4798]: I1011 04:14:36.287584 4798 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Oct 11 04:14:36 crc kubenswrapper[4798]: I1011 04:14:36.301897 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Oct 11 04:14:36 crc kubenswrapper[4798]: E1011 04:14:36.303152 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="17db9048-13f8-4d58-9098-c6f8ea322009" containerName="nova-scheduler-scheduler" Oct 11 04:14:36 crc kubenswrapper[4798]: I1011 04:14:36.303279 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="17db9048-13f8-4d58-9098-c6f8ea322009" containerName="nova-scheduler-scheduler" Oct 11 04:14:36 crc kubenswrapper[4798]: I1011 04:14:36.303630 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="17db9048-13f8-4d58-9098-c6f8ea322009" containerName="nova-scheduler-scheduler" Oct 11 04:14:36 crc kubenswrapper[4798]: I1011 04:14:36.305342 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Oct 11 04:14:36 crc kubenswrapper[4798]: I1011 04:14:36.309022 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Oct 11 04:14:36 crc kubenswrapper[4798]: I1011 04:14:36.313252 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Oct 11 04:14:36 crc kubenswrapper[4798]: I1011 04:14:36.507859 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/88bd4a12-3afa-47ca-8954-e83a63f635d9-config-data\") pod \"nova-scheduler-0\" (UID: \"88bd4a12-3afa-47ca-8954-e83a63f635d9\") " pod="openstack/nova-scheduler-0" Oct 11 04:14:36 crc kubenswrapper[4798]: I1011 04:14:36.508281 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dg2g4\" (UniqueName: \"kubernetes.io/projected/88bd4a12-3afa-47ca-8954-e83a63f635d9-kube-api-access-dg2g4\") pod \"nova-scheduler-0\" (UID: \"88bd4a12-3afa-47ca-8954-e83a63f635d9\") " pod="openstack/nova-scheduler-0" Oct 11 04:14:36 crc kubenswrapper[4798]: I1011 04:14:36.508910 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/88bd4a12-3afa-47ca-8954-e83a63f635d9-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"88bd4a12-3afa-47ca-8954-e83a63f635d9\") " pod="openstack/nova-scheduler-0" Oct 11 04:14:36 crc kubenswrapper[4798]: I1011 04:14:36.611298 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/88bd4a12-3afa-47ca-8954-e83a63f635d9-config-data\") pod \"nova-scheduler-0\" (UID: \"88bd4a12-3afa-47ca-8954-e83a63f635d9\") " pod="openstack/nova-scheduler-0" Oct 11 04:14:36 crc kubenswrapper[4798]: I1011 04:14:36.611953 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dg2g4\" (UniqueName: \"kubernetes.io/projected/88bd4a12-3afa-47ca-8954-e83a63f635d9-kube-api-access-dg2g4\") pod \"nova-scheduler-0\" (UID: \"88bd4a12-3afa-47ca-8954-e83a63f635d9\") " pod="openstack/nova-scheduler-0" Oct 11 04:14:36 crc kubenswrapper[4798]: I1011 04:14:36.612057 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/88bd4a12-3afa-47ca-8954-e83a63f635d9-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"88bd4a12-3afa-47ca-8954-e83a63f635d9\") " pod="openstack/nova-scheduler-0" Oct 11 04:14:36 crc kubenswrapper[4798]: I1011 04:14:36.615952 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/88bd4a12-3afa-47ca-8954-e83a63f635d9-config-data\") pod \"nova-scheduler-0\" (UID: \"88bd4a12-3afa-47ca-8954-e83a63f635d9\") " pod="openstack/nova-scheduler-0" Oct 11 04:14:36 crc kubenswrapper[4798]: I1011 04:14:36.616512 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/88bd4a12-3afa-47ca-8954-e83a63f635d9-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"88bd4a12-3afa-47ca-8954-e83a63f635d9\") " pod="openstack/nova-scheduler-0" Oct 11 04:14:36 crc kubenswrapper[4798]: I1011 04:14:36.628818 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dg2g4\" (UniqueName: \"kubernetes.io/projected/88bd4a12-3afa-47ca-8954-e83a63f635d9-kube-api-access-dg2g4\") pod \"nova-scheduler-0\" (UID: \"88bd4a12-3afa-47ca-8954-e83a63f635d9\") " pod="openstack/nova-scheduler-0" Oct 11 04:14:36 crc kubenswrapper[4798]: I1011 04:14:36.926089 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Oct 11 04:14:37 crc kubenswrapper[4798]: I1011 04:14:37.396726 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Oct 11 04:14:37 crc kubenswrapper[4798]: I1011 04:14:37.434829 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="17db9048-13f8-4d58-9098-c6f8ea322009" path="/var/lib/kubelet/pods/17db9048-13f8-4d58-9098-c6f8ea322009/volumes" Oct 11 04:14:38 crc kubenswrapper[4798]: I1011 04:14:38.269195 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"88bd4a12-3afa-47ca-8954-e83a63f635d9","Type":"ContainerStarted","Data":"408f9b89b934f942054f30fca53fb8d5051cfff016721108601d7f22823f268f"} Oct 11 04:14:38 crc kubenswrapper[4798]: I1011 04:14:38.269741 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"88bd4a12-3afa-47ca-8954-e83a63f635d9","Type":"ContainerStarted","Data":"8bde779e4006074867d4c230d2e9811f36c581df88f0c48f3321cb19dba341c0"} Oct 11 04:14:38 crc kubenswrapper[4798]: I1011 04:14:38.288043 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.288009748 podStartE2EDuration="2.288009748s" podCreationTimestamp="2025-10-11 04:14:36 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 04:14:38.286238886 +0000 UTC m=+1173.622528572" watchObservedRunningTime="2025-10-11 04:14:38.288009748 +0000 UTC m=+1173.624299464" Oct 11 04:14:38 crc kubenswrapper[4798]: I1011 04:14:38.581040 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Oct 11 04:14:38 crc kubenswrapper[4798]: I1011 04:14:38.581101 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Oct 11 04:14:40 crc kubenswrapper[4798]: I1011 04:14:40.551031 4798 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Oct 11 04:14:40 crc kubenswrapper[4798]: I1011 04:14:40.551667 4798 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Oct 11 04:14:41 crc kubenswrapper[4798]: I1011 04:14:41.563951 4798 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="3498eff7-9fc6-42de-ab90-0df1ab533041" containerName="nova-api-log" probeResult="failure" output="Get \"https://10.217.0.186:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Oct 11 04:14:41 crc kubenswrapper[4798]: I1011 04:14:41.564377 4798 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="3498eff7-9fc6-42de-ab90-0df1ab533041" containerName="nova-api-api" probeResult="failure" output="Get \"https://10.217.0.186:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Oct 11 04:14:41 crc kubenswrapper[4798]: I1011 04:14:41.926429 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Oct 11 04:14:43 crc kubenswrapper[4798]: I1011 04:14:43.588578 4798 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Oct 11 04:14:43 crc kubenswrapper[4798]: I1011 04:14:43.589903 4798 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Oct 11 04:14:44 crc kubenswrapper[4798]: I1011 04:14:44.601649 4798 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="9fa916be-a581-493a-ade0-f5683c1d31e1" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.187:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Oct 11 04:14:44 crc kubenswrapper[4798]: I1011 04:14:44.601686 4798 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="9fa916be-a581-493a-ade0-f5683c1d31e1" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.187:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Oct 11 04:14:46 crc kubenswrapper[4798]: I1011 04:14:46.927128 4798 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Oct 11 04:14:46 crc kubenswrapper[4798]: I1011 04:14:46.966272 4798 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Oct 11 04:14:47 crc kubenswrapper[4798]: I1011 04:14:47.319548 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Oct 11 04:14:47 crc kubenswrapper[4798]: I1011 04:14:47.416682 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Oct 11 04:14:50 crc kubenswrapper[4798]: I1011 04:14:50.557465 4798 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Oct 11 04:14:50 crc kubenswrapper[4798]: I1011 04:14:50.558254 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Oct 11 04:14:50 crc kubenswrapper[4798]: I1011 04:14:50.561477 4798 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Oct 11 04:14:50 crc kubenswrapper[4798]: I1011 04:14:50.564031 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Oct 11 04:14:51 crc kubenswrapper[4798]: I1011 04:14:51.442783 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Oct 11 04:14:51 crc kubenswrapper[4798]: I1011 04:14:51.442971 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Oct 11 04:14:53 crc kubenswrapper[4798]: I1011 04:14:53.588028 4798 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Oct 11 04:14:53 crc kubenswrapper[4798]: I1011 04:14:53.588444 4798 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Oct 11 04:14:53 crc kubenswrapper[4798]: I1011 04:14:53.595491 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Oct 11 04:14:53 crc kubenswrapper[4798]: I1011 04:14:53.596589 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Oct 11 04:15:00 crc kubenswrapper[4798]: I1011 04:15:00.151154 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29335935-k7lwt"] Oct 11 04:15:00 crc kubenswrapper[4798]: I1011 04:15:00.152996 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29335935-k7lwt" Oct 11 04:15:00 crc kubenswrapper[4798]: I1011 04:15:00.154930 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Oct 11 04:15:00 crc kubenswrapper[4798]: I1011 04:15:00.155149 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Oct 11 04:15:00 crc kubenswrapper[4798]: I1011 04:15:00.179508 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29335935-k7lwt"] Oct 11 04:15:00 crc kubenswrapper[4798]: I1011 04:15:00.335343 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/9a510113-4967-4bc5-81db-405d51a75250-secret-volume\") pod \"collect-profiles-29335935-k7lwt\" (UID: \"9a510113-4967-4bc5-81db-405d51a75250\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29335935-k7lwt" Oct 11 04:15:00 crc kubenswrapper[4798]: I1011 04:15:00.335475 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/9a510113-4967-4bc5-81db-405d51a75250-config-volume\") pod \"collect-profiles-29335935-k7lwt\" (UID: \"9a510113-4967-4bc5-81db-405d51a75250\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29335935-k7lwt" Oct 11 04:15:00 crc kubenswrapper[4798]: I1011 04:15:00.335587 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-679bz\" (UniqueName: \"kubernetes.io/projected/9a510113-4967-4bc5-81db-405d51a75250-kube-api-access-679bz\") pod \"collect-profiles-29335935-k7lwt\" (UID: \"9a510113-4967-4bc5-81db-405d51a75250\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29335935-k7lwt" Oct 11 04:15:00 crc kubenswrapper[4798]: I1011 04:15:00.437625 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-679bz\" (UniqueName: \"kubernetes.io/projected/9a510113-4967-4bc5-81db-405d51a75250-kube-api-access-679bz\") pod \"collect-profiles-29335935-k7lwt\" (UID: \"9a510113-4967-4bc5-81db-405d51a75250\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29335935-k7lwt" Oct 11 04:15:00 crc kubenswrapper[4798]: I1011 04:15:00.437716 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/9a510113-4967-4bc5-81db-405d51a75250-secret-volume\") pod \"collect-profiles-29335935-k7lwt\" (UID: \"9a510113-4967-4bc5-81db-405d51a75250\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29335935-k7lwt" Oct 11 04:15:00 crc kubenswrapper[4798]: I1011 04:15:00.437770 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/9a510113-4967-4bc5-81db-405d51a75250-config-volume\") pod \"collect-profiles-29335935-k7lwt\" (UID: \"9a510113-4967-4bc5-81db-405d51a75250\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29335935-k7lwt" Oct 11 04:15:00 crc kubenswrapper[4798]: I1011 04:15:00.438636 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/9a510113-4967-4bc5-81db-405d51a75250-config-volume\") pod \"collect-profiles-29335935-k7lwt\" (UID: \"9a510113-4967-4bc5-81db-405d51a75250\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29335935-k7lwt" Oct 11 04:15:00 crc kubenswrapper[4798]: I1011 04:15:00.447611 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/9a510113-4967-4bc5-81db-405d51a75250-secret-volume\") pod \"collect-profiles-29335935-k7lwt\" (UID: \"9a510113-4967-4bc5-81db-405d51a75250\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29335935-k7lwt" Oct 11 04:15:00 crc kubenswrapper[4798]: I1011 04:15:00.454810 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-679bz\" (UniqueName: \"kubernetes.io/projected/9a510113-4967-4bc5-81db-405d51a75250-kube-api-access-679bz\") pod \"collect-profiles-29335935-k7lwt\" (UID: \"9a510113-4967-4bc5-81db-405d51a75250\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29335935-k7lwt" Oct 11 04:15:00 crc kubenswrapper[4798]: I1011 04:15:00.477729 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29335935-k7lwt" Oct 11 04:15:00 crc kubenswrapper[4798]: I1011 04:15:00.953139 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29335935-k7lwt"] Oct 11 04:15:01 crc kubenswrapper[4798]: I1011 04:15:01.421870 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Oct 11 04:15:01 crc kubenswrapper[4798]: I1011 04:15:01.540575 4798 generic.go:334] "Generic (PLEG): container finished" podID="9a510113-4967-4bc5-81db-405d51a75250" containerID="af3930905326c3b9f080dfe11c73fec17f94806f2a97ac8659fcef5d8f14f5b2" exitCode=0 Oct 11 04:15:01 crc kubenswrapper[4798]: I1011 04:15:01.541013 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29335935-k7lwt" event={"ID":"9a510113-4967-4bc5-81db-405d51a75250","Type":"ContainerDied","Data":"af3930905326c3b9f080dfe11c73fec17f94806f2a97ac8659fcef5d8f14f5b2"} Oct 11 04:15:01 crc kubenswrapper[4798]: I1011 04:15:01.541047 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29335935-k7lwt" event={"ID":"9a510113-4967-4bc5-81db-405d51a75250","Type":"ContainerStarted","Data":"0bdc5af6171e14a40e270e26201a985a0fad407660610f9cf589744594c6ffdf"} Oct 11 04:15:02 crc kubenswrapper[4798]: I1011 04:15:02.433037 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Oct 11 04:15:02 crc kubenswrapper[4798]: I1011 04:15:02.924568 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29335935-k7lwt" Oct 11 04:15:03 crc kubenswrapper[4798]: I1011 04:15:03.093589 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-679bz\" (UniqueName: \"kubernetes.io/projected/9a510113-4967-4bc5-81db-405d51a75250-kube-api-access-679bz\") pod \"9a510113-4967-4bc5-81db-405d51a75250\" (UID: \"9a510113-4967-4bc5-81db-405d51a75250\") " Oct 11 04:15:03 crc kubenswrapper[4798]: I1011 04:15:03.093811 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/9a510113-4967-4bc5-81db-405d51a75250-config-volume\") pod \"9a510113-4967-4bc5-81db-405d51a75250\" (UID: \"9a510113-4967-4bc5-81db-405d51a75250\") " Oct 11 04:15:03 crc kubenswrapper[4798]: I1011 04:15:03.093904 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/9a510113-4967-4bc5-81db-405d51a75250-secret-volume\") pod \"9a510113-4967-4bc5-81db-405d51a75250\" (UID: \"9a510113-4967-4bc5-81db-405d51a75250\") " Oct 11 04:15:03 crc kubenswrapper[4798]: I1011 04:15:03.094872 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9a510113-4967-4bc5-81db-405d51a75250-config-volume" (OuterVolumeSpecName: "config-volume") pod "9a510113-4967-4bc5-81db-405d51a75250" (UID: "9a510113-4967-4bc5-81db-405d51a75250"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 04:15:03 crc kubenswrapper[4798]: I1011 04:15:03.103926 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9a510113-4967-4bc5-81db-405d51a75250-kube-api-access-679bz" (OuterVolumeSpecName: "kube-api-access-679bz") pod "9a510113-4967-4bc5-81db-405d51a75250" (UID: "9a510113-4967-4bc5-81db-405d51a75250"). InnerVolumeSpecName "kube-api-access-679bz". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 04:15:03 crc kubenswrapper[4798]: I1011 04:15:03.114294 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9a510113-4967-4bc5-81db-405d51a75250-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "9a510113-4967-4bc5-81db-405d51a75250" (UID: "9a510113-4967-4bc5-81db-405d51a75250"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:15:03 crc kubenswrapper[4798]: I1011 04:15:03.196830 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-679bz\" (UniqueName: \"kubernetes.io/projected/9a510113-4967-4bc5-81db-405d51a75250-kube-api-access-679bz\") on node \"crc\" DevicePath \"\"" Oct 11 04:15:03 crc kubenswrapper[4798]: I1011 04:15:03.196871 4798 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/9a510113-4967-4bc5-81db-405d51a75250-config-volume\") on node \"crc\" DevicePath \"\"" Oct 11 04:15:03 crc kubenswrapper[4798]: I1011 04:15:03.196883 4798 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/9a510113-4967-4bc5-81db-405d51a75250-secret-volume\") on node \"crc\" DevicePath \"\"" Oct 11 04:15:03 crc kubenswrapper[4798]: I1011 04:15:03.560424 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29335935-k7lwt" event={"ID":"9a510113-4967-4bc5-81db-405d51a75250","Type":"ContainerDied","Data":"0bdc5af6171e14a40e270e26201a985a0fad407660610f9cf589744594c6ffdf"} Oct 11 04:15:03 crc kubenswrapper[4798]: I1011 04:15:03.560898 4798 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0bdc5af6171e14a40e270e26201a985a0fad407660610f9cf589744594c6ffdf" Oct 11 04:15:03 crc kubenswrapper[4798]: I1011 04:15:03.560503 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29335935-k7lwt" Oct 11 04:15:06 crc kubenswrapper[4798]: I1011 04:15:06.009208 4798 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-server-0" podUID="eff19131-d13b-495f-94a3-097e030fd3cd" containerName="rabbitmq" containerID="cri-o://e4ce79436847e4997249775c35dd097ae6234543ccd06b3be91fee95ce038ee8" gracePeriod=604796 Oct 11 04:15:06 crc kubenswrapper[4798]: I1011 04:15:06.587854 4798 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-server-0" podUID="eff19131-d13b-495f-94a3-097e030fd3cd" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.101:5671: connect: connection refused" Oct 11 04:15:06 crc kubenswrapper[4798]: I1011 04:15:06.779059 4798 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-cell1-server-0" podUID="f7b18bbb-aead-4356-99b3-f1ee253f86e8" containerName="rabbitmq" containerID="cri-o://faf08274c494cd3fe463ce8dd317d209c1bb6c482204375c1aa72645d1c00b9b" gracePeriod=604796 Oct 11 04:15:06 crc kubenswrapper[4798]: I1011 04:15:06.950868 4798 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-cell1-server-0" podUID="f7b18bbb-aead-4356-99b3-f1ee253f86e8" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.102:5671: connect: connection refused" Oct 11 04:15:12 crc kubenswrapper[4798]: I1011 04:15:12.575892 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Oct 11 04:15:12 crc kubenswrapper[4798]: I1011 04:15:12.595815 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"eff19131-d13b-495f-94a3-097e030fd3cd\" (UID: \"eff19131-d13b-495f-94a3-097e030fd3cd\") " Oct 11 04:15:12 crc kubenswrapper[4798]: I1011 04:15:12.595876 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/eff19131-d13b-495f-94a3-097e030fd3cd-config-data\") pod \"eff19131-d13b-495f-94a3-097e030fd3cd\" (UID: \"eff19131-d13b-495f-94a3-097e030fd3cd\") " Oct 11 04:15:12 crc kubenswrapper[4798]: I1011 04:15:12.595968 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/eff19131-d13b-495f-94a3-097e030fd3cd-rabbitmq-confd\") pod \"eff19131-d13b-495f-94a3-097e030fd3cd\" (UID: \"eff19131-d13b-495f-94a3-097e030fd3cd\") " Oct 11 04:15:12 crc kubenswrapper[4798]: I1011 04:15:12.596002 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/eff19131-d13b-495f-94a3-097e030fd3cd-rabbitmq-plugins\") pod \"eff19131-d13b-495f-94a3-097e030fd3cd\" (UID: \"eff19131-d13b-495f-94a3-097e030fd3cd\") " Oct 11 04:15:12 crc kubenswrapper[4798]: I1011 04:15:12.596045 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wjb9j\" (UniqueName: \"kubernetes.io/projected/eff19131-d13b-495f-94a3-097e030fd3cd-kube-api-access-wjb9j\") pod \"eff19131-d13b-495f-94a3-097e030fd3cd\" (UID: \"eff19131-d13b-495f-94a3-097e030fd3cd\") " Oct 11 04:15:12 crc kubenswrapper[4798]: I1011 04:15:12.596078 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/eff19131-d13b-495f-94a3-097e030fd3cd-erlang-cookie-secret\") pod \"eff19131-d13b-495f-94a3-097e030fd3cd\" (UID: \"eff19131-d13b-495f-94a3-097e030fd3cd\") " Oct 11 04:15:12 crc kubenswrapper[4798]: I1011 04:15:12.596109 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/eff19131-d13b-495f-94a3-097e030fd3cd-plugins-conf\") pod \"eff19131-d13b-495f-94a3-097e030fd3cd\" (UID: \"eff19131-d13b-495f-94a3-097e030fd3cd\") " Oct 11 04:15:12 crc kubenswrapper[4798]: I1011 04:15:12.596157 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/eff19131-d13b-495f-94a3-097e030fd3cd-pod-info\") pod \"eff19131-d13b-495f-94a3-097e030fd3cd\" (UID: \"eff19131-d13b-495f-94a3-097e030fd3cd\") " Oct 11 04:15:12 crc kubenswrapper[4798]: I1011 04:15:12.596194 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/eff19131-d13b-495f-94a3-097e030fd3cd-server-conf\") pod \"eff19131-d13b-495f-94a3-097e030fd3cd\" (UID: \"eff19131-d13b-495f-94a3-097e030fd3cd\") " Oct 11 04:15:12 crc kubenswrapper[4798]: I1011 04:15:12.596245 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/eff19131-d13b-495f-94a3-097e030fd3cd-rabbitmq-erlang-cookie\") pod \"eff19131-d13b-495f-94a3-097e030fd3cd\" (UID: \"eff19131-d13b-495f-94a3-097e030fd3cd\") " Oct 11 04:15:12 crc kubenswrapper[4798]: I1011 04:15:12.596289 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/eff19131-d13b-495f-94a3-097e030fd3cd-rabbitmq-tls\") pod \"eff19131-d13b-495f-94a3-097e030fd3cd\" (UID: \"eff19131-d13b-495f-94a3-097e030fd3cd\") " Oct 11 04:15:12 crc kubenswrapper[4798]: I1011 04:15:12.605581 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/eff19131-d13b-495f-94a3-097e030fd3cd-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "eff19131-d13b-495f-94a3-097e030fd3cd" (UID: "eff19131-d13b-495f-94a3-097e030fd3cd"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 04:15:12 crc kubenswrapper[4798]: I1011 04:15:12.606387 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/eff19131-d13b-495f-94a3-097e030fd3cd-pod-info" (OuterVolumeSpecName: "pod-info") pod "eff19131-d13b-495f-94a3-097e030fd3cd" (UID: "eff19131-d13b-495f-94a3-097e030fd3cd"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Oct 11 04:15:12 crc kubenswrapper[4798]: I1011 04:15:12.608630 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/eff19131-d13b-495f-94a3-097e030fd3cd-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "eff19131-d13b-495f-94a3-097e030fd3cd" (UID: "eff19131-d13b-495f-94a3-097e030fd3cd"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 04:15:12 crc kubenswrapper[4798]: I1011 04:15:12.612106 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/eff19131-d13b-495f-94a3-097e030fd3cd-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "eff19131-d13b-495f-94a3-097e030fd3cd" (UID: "eff19131-d13b-495f-94a3-097e030fd3cd"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 04:15:12 crc kubenswrapper[4798]: I1011 04:15:12.612343 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/eff19131-d13b-495f-94a3-097e030fd3cd-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "eff19131-d13b-495f-94a3-097e030fd3cd" (UID: "eff19131-d13b-495f-94a3-097e030fd3cd"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 04:15:12 crc kubenswrapper[4798]: I1011 04:15:12.612992 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/eff19131-d13b-495f-94a3-097e030fd3cd-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "eff19131-d13b-495f-94a3-097e030fd3cd" (UID: "eff19131-d13b-495f-94a3-097e030fd3cd"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:15:12 crc kubenswrapper[4798]: I1011 04:15:12.613512 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage01-crc" (OuterVolumeSpecName: "persistence") pod "eff19131-d13b-495f-94a3-097e030fd3cd" (UID: "eff19131-d13b-495f-94a3-097e030fd3cd"). InnerVolumeSpecName "local-storage01-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Oct 11 04:15:12 crc kubenswrapper[4798]: I1011 04:15:12.639704 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/eff19131-d13b-495f-94a3-097e030fd3cd-config-data" (OuterVolumeSpecName: "config-data") pod "eff19131-d13b-495f-94a3-097e030fd3cd" (UID: "eff19131-d13b-495f-94a3-097e030fd3cd"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 04:15:12 crc kubenswrapper[4798]: I1011 04:15:12.643654 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/eff19131-d13b-495f-94a3-097e030fd3cd-kube-api-access-wjb9j" (OuterVolumeSpecName: "kube-api-access-wjb9j") pod "eff19131-d13b-495f-94a3-097e030fd3cd" (UID: "eff19131-d13b-495f-94a3-097e030fd3cd"). InnerVolumeSpecName "kube-api-access-wjb9j". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 04:15:12 crc kubenswrapper[4798]: I1011 04:15:12.667281 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/eff19131-d13b-495f-94a3-097e030fd3cd-server-conf" (OuterVolumeSpecName: "server-conf") pod "eff19131-d13b-495f-94a3-097e030fd3cd" (UID: "eff19131-d13b-495f-94a3-097e030fd3cd"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 04:15:12 crc kubenswrapper[4798]: I1011 04:15:12.667560 4798 generic.go:334] "Generic (PLEG): container finished" podID="eff19131-d13b-495f-94a3-097e030fd3cd" containerID="e4ce79436847e4997249775c35dd097ae6234543ccd06b3be91fee95ce038ee8" exitCode=0 Oct 11 04:15:12 crc kubenswrapper[4798]: I1011 04:15:12.667611 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"eff19131-d13b-495f-94a3-097e030fd3cd","Type":"ContainerDied","Data":"e4ce79436847e4997249775c35dd097ae6234543ccd06b3be91fee95ce038ee8"} Oct 11 04:15:12 crc kubenswrapper[4798]: I1011 04:15:12.667642 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"eff19131-d13b-495f-94a3-097e030fd3cd","Type":"ContainerDied","Data":"f5cf56969a3e007e59fdcf2b0d2eec2da5d53d593d125796e2a8c0050bfe6934"} Oct 11 04:15:12 crc kubenswrapper[4798]: I1011 04:15:12.667661 4798 scope.go:117] "RemoveContainer" containerID="e4ce79436847e4997249775c35dd097ae6234543ccd06b3be91fee95ce038ee8" Oct 11 04:15:12 crc kubenswrapper[4798]: I1011 04:15:12.667862 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Oct 11 04:15:12 crc kubenswrapper[4798]: I1011 04:15:12.702022 4798 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/eff19131-d13b-495f-94a3-097e030fd3cd-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Oct 11 04:15:12 crc kubenswrapper[4798]: I1011 04:15:12.702086 4798 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") on node \"crc\" " Oct 11 04:15:12 crc kubenswrapper[4798]: I1011 04:15:12.702097 4798 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/eff19131-d13b-495f-94a3-097e030fd3cd-config-data\") on node \"crc\" DevicePath \"\"" Oct 11 04:15:12 crc kubenswrapper[4798]: I1011 04:15:12.702106 4798 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/eff19131-d13b-495f-94a3-097e030fd3cd-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Oct 11 04:15:12 crc kubenswrapper[4798]: I1011 04:15:12.702117 4798 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/eff19131-d13b-495f-94a3-097e030fd3cd-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Oct 11 04:15:12 crc kubenswrapper[4798]: I1011 04:15:12.702127 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wjb9j\" (UniqueName: \"kubernetes.io/projected/eff19131-d13b-495f-94a3-097e030fd3cd-kube-api-access-wjb9j\") on node \"crc\" DevicePath \"\"" Oct 11 04:15:12 crc kubenswrapper[4798]: I1011 04:15:12.702137 4798 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/eff19131-d13b-495f-94a3-097e030fd3cd-plugins-conf\") on node \"crc\" DevicePath \"\"" Oct 11 04:15:12 crc kubenswrapper[4798]: I1011 04:15:12.702144 4798 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/eff19131-d13b-495f-94a3-097e030fd3cd-pod-info\") on node \"crc\" DevicePath \"\"" Oct 11 04:15:12 crc kubenswrapper[4798]: I1011 04:15:12.702153 4798 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/eff19131-d13b-495f-94a3-097e030fd3cd-server-conf\") on node \"crc\" DevicePath \"\"" Oct 11 04:15:12 crc kubenswrapper[4798]: I1011 04:15:12.702164 4798 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/eff19131-d13b-495f-94a3-097e030fd3cd-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Oct 11 04:15:12 crc kubenswrapper[4798]: I1011 04:15:12.736270 4798 scope.go:117] "RemoveContainer" containerID="030275056fe2ac998fca1a9e12a7334f798986e02b40dd3ccf746ef1ee118864" Oct 11 04:15:12 crc kubenswrapper[4798]: I1011 04:15:12.751065 4798 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage01-crc" (UniqueName: "kubernetes.io/local-volume/local-storage01-crc") on node "crc" Oct 11 04:15:12 crc kubenswrapper[4798]: I1011 04:15:12.767802 4798 scope.go:117] "RemoveContainer" containerID="e4ce79436847e4997249775c35dd097ae6234543ccd06b3be91fee95ce038ee8" Oct 11 04:15:12 crc kubenswrapper[4798]: E1011 04:15:12.768241 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e4ce79436847e4997249775c35dd097ae6234543ccd06b3be91fee95ce038ee8\": container with ID starting with e4ce79436847e4997249775c35dd097ae6234543ccd06b3be91fee95ce038ee8 not found: ID does not exist" containerID="e4ce79436847e4997249775c35dd097ae6234543ccd06b3be91fee95ce038ee8" Oct 11 04:15:12 crc kubenswrapper[4798]: I1011 04:15:12.768298 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e4ce79436847e4997249775c35dd097ae6234543ccd06b3be91fee95ce038ee8"} err="failed to get container status \"e4ce79436847e4997249775c35dd097ae6234543ccd06b3be91fee95ce038ee8\": rpc error: code = NotFound desc = could not find container \"e4ce79436847e4997249775c35dd097ae6234543ccd06b3be91fee95ce038ee8\": container with ID starting with e4ce79436847e4997249775c35dd097ae6234543ccd06b3be91fee95ce038ee8 not found: ID does not exist" Oct 11 04:15:12 crc kubenswrapper[4798]: I1011 04:15:12.768336 4798 scope.go:117] "RemoveContainer" containerID="030275056fe2ac998fca1a9e12a7334f798986e02b40dd3ccf746ef1ee118864" Oct 11 04:15:12 crc kubenswrapper[4798]: E1011 04:15:12.768804 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"030275056fe2ac998fca1a9e12a7334f798986e02b40dd3ccf746ef1ee118864\": container with ID starting with 030275056fe2ac998fca1a9e12a7334f798986e02b40dd3ccf746ef1ee118864 not found: ID does not exist" containerID="030275056fe2ac998fca1a9e12a7334f798986e02b40dd3ccf746ef1ee118864" Oct 11 04:15:12 crc kubenswrapper[4798]: I1011 04:15:12.768839 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"030275056fe2ac998fca1a9e12a7334f798986e02b40dd3ccf746ef1ee118864"} err="failed to get container status \"030275056fe2ac998fca1a9e12a7334f798986e02b40dd3ccf746ef1ee118864\": rpc error: code = NotFound desc = could not find container \"030275056fe2ac998fca1a9e12a7334f798986e02b40dd3ccf746ef1ee118864\": container with ID starting with 030275056fe2ac998fca1a9e12a7334f798986e02b40dd3ccf746ef1ee118864 not found: ID does not exist" Oct 11 04:15:12 crc kubenswrapper[4798]: I1011 04:15:12.806975 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/eff19131-d13b-495f-94a3-097e030fd3cd-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "eff19131-d13b-495f-94a3-097e030fd3cd" (UID: "eff19131-d13b-495f-94a3-097e030fd3cd"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 04:15:12 crc kubenswrapper[4798]: I1011 04:15:12.808895 4798 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/eff19131-d13b-495f-94a3-097e030fd3cd-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Oct 11 04:15:12 crc kubenswrapper[4798]: I1011 04:15:12.808947 4798 reconciler_common.go:293] "Volume detached for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") on node \"crc\" DevicePath \"\"" Oct 11 04:15:13 crc kubenswrapper[4798]: I1011 04:15:13.041657 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Oct 11 04:15:13 crc kubenswrapper[4798]: I1011 04:15:13.079539 4798 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-server-0"] Oct 11 04:15:13 crc kubenswrapper[4798]: I1011 04:15:13.085477 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-0"] Oct 11 04:15:13 crc kubenswrapper[4798]: E1011 04:15:13.085931 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9a510113-4967-4bc5-81db-405d51a75250" containerName="collect-profiles" Oct 11 04:15:13 crc kubenswrapper[4798]: I1011 04:15:13.085952 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="9a510113-4967-4bc5-81db-405d51a75250" containerName="collect-profiles" Oct 11 04:15:13 crc kubenswrapper[4798]: E1011 04:15:13.085962 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eff19131-d13b-495f-94a3-097e030fd3cd" containerName="rabbitmq" Oct 11 04:15:13 crc kubenswrapper[4798]: I1011 04:15:13.085968 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="eff19131-d13b-495f-94a3-097e030fd3cd" containerName="rabbitmq" Oct 11 04:15:13 crc kubenswrapper[4798]: E1011 04:15:13.086008 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eff19131-d13b-495f-94a3-097e030fd3cd" containerName="setup-container" Oct 11 04:15:13 crc kubenswrapper[4798]: I1011 04:15:13.086015 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="eff19131-d13b-495f-94a3-097e030fd3cd" containerName="setup-container" Oct 11 04:15:13 crc kubenswrapper[4798]: I1011 04:15:13.086183 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="9a510113-4967-4bc5-81db-405d51a75250" containerName="collect-profiles" Oct 11 04:15:13 crc kubenswrapper[4798]: I1011 04:15:13.086211 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="eff19131-d13b-495f-94a3-097e030fd3cd" containerName="rabbitmq" Oct 11 04:15:13 crc kubenswrapper[4798]: I1011 04:15:13.087252 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Oct 11 04:15:13 crc kubenswrapper[4798]: I1011 04:15:13.092171 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-plugins-conf" Oct 11 04:15:13 crc kubenswrapper[4798]: I1011 04:15:13.093160 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-server-dockercfg-52nsp" Oct 11 04:15:13 crc kubenswrapper[4798]: I1011 04:15:13.093493 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-default-user" Oct 11 04:15:13 crc kubenswrapper[4798]: I1011 04:15:13.093685 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-config-data" Oct 11 04:15:13 crc kubenswrapper[4798]: I1011 04:15:13.093951 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-erlang-cookie" Oct 11 04:15:13 crc kubenswrapper[4798]: I1011 04:15:13.094155 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-svc" Oct 11 04:15:13 crc kubenswrapper[4798]: I1011 04:15:13.094333 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-server-conf" Oct 11 04:15:13 crc kubenswrapper[4798]: I1011 04:15:13.105532 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Oct 11 04:15:13 crc kubenswrapper[4798]: I1011 04:15:13.230966 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/c95f2c7f-03e4-4ded-8ff7-8d7804dae8fe-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"c95f2c7f-03e4-4ded-8ff7-8d7804dae8fe\") " pod="openstack/rabbitmq-server-0" Oct 11 04:15:13 crc kubenswrapper[4798]: I1011 04:15:13.231048 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/c95f2c7f-03e4-4ded-8ff7-8d7804dae8fe-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"c95f2c7f-03e4-4ded-8ff7-8d7804dae8fe\") " pod="openstack/rabbitmq-server-0" Oct 11 04:15:13 crc kubenswrapper[4798]: I1011 04:15:13.231078 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/c95f2c7f-03e4-4ded-8ff7-8d7804dae8fe-server-conf\") pod \"rabbitmq-server-0\" (UID: \"c95f2c7f-03e4-4ded-8ff7-8d7804dae8fe\") " pod="openstack/rabbitmq-server-0" Oct 11 04:15:13 crc kubenswrapper[4798]: I1011 04:15:13.231108 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ndz6p\" (UniqueName: \"kubernetes.io/projected/c95f2c7f-03e4-4ded-8ff7-8d7804dae8fe-kube-api-access-ndz6p\") pod \"rabbitmq-server-0\" (UID: \"c95f2c7f-03e4-4ded-8ff7-8d7804dae8fe\") " pod="openstack/rabbitmq-server-0" Oct 11 04:15:13 crc kubenswrapper[4798]: I1011 04:15:13.231129 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/c95f2c7f-03e4-4ded-8ff7-8d7804dae8fe-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"c95f2c7f-03e4-4ded-8ff7-8d7804dae8fe\") " pod="openstack/rabbitmq-server-0" Oct 11 04:15:13 crc kubenswrapper[4798]: I1011 04:15:13.231152 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/c95f2c7f-03e4-4ded-8ff7-8d7804dae8fe-config-data\") pod \"rabbitmq-server-0\" (UID: \"c95f2c7f-03e4-4ded-8ff7-8d7804dae8fe\") " pod="openstack/rabbitmq-server-0" Oct 11 04:15:13 crc kubenswrapper[4798]: I1011 04:15:13.231171 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/c95f2c7f-03e4-4ded-8ff7-8d7804dae8fe-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"c95f2c7f-03e4-4ded-8ff7-8d7804dae8fe\") " pod="openstack/rabbitmq-server-0" Oct 11 04:15:13 crc kubenswrapper[4798]: I1011 04:15:13.231198 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/c95f2c7f-03e4-4ded-8ff7-8d7804dae8fe-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"c95f2c7f-03e4-4ded-8ff7-8d7804dae8fe\") " pod="openstack/rabbitmq-server-0" Oct 11 04:15:13 crc kubenswrapper[4798]: I1011 04:15:13.231217 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/c95f2c7f-03e4-4ded-8ff7-8d7804dae8fe-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"c95f2c7f-03e4-4ded-8ff7-8d7804dae8fe\") " pod="openstack/rabbitmq-server-0" Oct 11 04:15:13 crc kubenswrapper[4798]: I1011 04:15:13.231237 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/c95f2c7f-03e4-4ded-8ff7-8d7804dae8fe-pod-info\") pod \"rabbitmq-server-0\" (UID: \"c95f2c7f-03e4-4ded-8ff7-8d7804dae8fe\") " pod="openstack/rabbitmq-server-0" Oct 11 04:15:13 crc kubenswrapper[4798]: I1011 04:15:13.231408 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-server-0\" (UID: \"c95f2c7f-03e4-4ded-8ff7-8d7804dae8fe\") " pod="openstack/rabbitmq-server-0" Oct 11 04:15:13 crc kubenswrapper[4798]: I1011 04:15:13.333888 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-server-0\" (UID: \"c95f2c7f-03e4-4ded-8ff7-8d7804dae8fe\") " pod="openstack/rabbitmq-server-0" Oct 11 04:15:13 crc kubenswrapper[4798]: I1011 04:15:13.334517 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/c95f2c7f-03e4-4ded-8ff7-8d7804dae8fe-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"c95f2c7f-03e4-4ded-8ff7-8d7804dae8fe\") " pod="openstack/rabbitmq-server-0" Oct 11 04:15:13 crc kubenswrapper[4798]: I1011 04:15:13.334633 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/c95f2c7f-03e4-4ded-8ff7-8d7804dae8fe-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"c95f2c7f-03e4-4ded-8ff7-8d7804dae8fe\") " pod="openstack/rabbitmq-server-0" Oct 11 04:15:13 crc kubenswrapper[4798]: I1011 04:15:13.334677 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/c95f2c7f-03e4-4ded-8ff7-8d7804dae8fe-server-conf\") pod \"rabbitmq-server-0\" (UID: \"c95f2c7f-03e4-4ded-8ff7-8d7804dae8fe\") " pod="openstack/rabbitmq-server-0" Oct 11 04:15:13 crc kubenswrapper[4798]: I1011 04:15:13.334731 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ndz6p\" (UniqueName: \"kubernetes.io/projected/c95f2c7f-03e4-4ded-8ff7-8d7804dae8fe-kube-api-access-ndz6p\") pod \"rabbitmq-server-0\" (UID: \"c95f2c7f-03e4-4ded-8ff7-8d7804dae8fe\") " pod="openstack/rabbitmq-server-0" Oct 11 04:15:13 crc kubenswrapper[4798]: I1011 04:15:13.334763 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/c95f2c7f-03e4-4ded-8ff7-8d7804dae8fe-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"c95f2c7f-03e4-4ded-8ff7-8d7804dae8fe\") " pod="openstack/rabbitmq-server-0" Oct 11 04:15:13 crc kubenswrapper[4798]: I1011 04:15:13.334791 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/c95f2c7f-03e4-4ded-8ff7-8d7804dae8fe-config-data\") pod \"rabbitmq-server-0\" (UID: \"c95f2c7f-03e4-4ded-8ff7-8d7804dae8fe\") " pod="openstack/rabbitmq-server-0" Oct 11 04:15:13 crc kubenswrapper[4798]: I1011 04:15:13.334827 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/c95f2c7f-03e4-4ded-8ff7-8d7804dae8fe-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"c95f2c7f-03e4-4ded-8ff7-8d7804dae8fe\") " pod="openstack/rabbitmq-server-0" Oct 11 04:15:13 crc kubenswrapper[4798]: I1011 04:15:13.334892 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/c95f2c7f-03e4-4ded-8ff7-8d7804dae8fe-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"c95f2c7f-03e4-4ded-8ff7-8d7804dae8fe\") " pod="openstack/rabbitmq-server-0" Oct 11 04:15:13 crc kubenswrapper[4798]: I1011 04:15:13.334931 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/c95f2c7f-03e4-4ded-8ff7-8d7804dae8fe-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"c95f2c7f-03e4-4ded-8ff7-8d7804dae8fe\") " pod="openstack/rabbitmq-server-0" Oct 11 04:15:13 crc kubenswrapper[4798]: I1011 04:15:13.334966 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/c95f2c7f-03e4-4ded-8ff7-8d7804dae8fe-pod-info\") pod \"rabbitmq-server-0\" (UID: \"c95f2c7f-03e4-4ded-8ff7-8d7804dae8fe\") " pod="openstack/rabbitmq-server-0" Oct 11 04:15:13 crc kubenswrapper[4798]: I1011 04:15:13.334193 4798 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-server-0\" (UID: \"c95f2c7f-03e4-4ded-8ff7-8d7804dae8fe\") device mount path \"/mnt/openstack/pv01\"" pod="openstack/rabbitmq-server-0" Oct 11 04:15:13 crc kubenswrapper[4798]: I1011 04:15:13.336635 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/c95f2c7f-03e4-4ded-8ff7-8d7804dae8fe-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"c95f2c7f-03e4-4ded-8ff7-8d7804dae8fe\") " pod="openstack/rabbitmq-server-0" Oct 11 04:15:13 crc kubenswrapper[4798]: I1011 04:15:13.336877 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/c95f2c7f-03e4-4ded-8ff7-8d7804dae8fe-config-data\") pod \"rabbitmq-server-0\" (UID: \"c95f2c7f-03e4-4ded-8ff7-8d7804dae8fe\") " pod="openstack/rabbitmq-server-0" Oct 11 04:15:13 crc kubenswrapper[4798]: I1011 04:15:13.337039 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/c95f2c7f-03e4-4ded-8ff7-8d7804dae8fe-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"c95f2c7f-03e4-4ded-8ff7-8d7804dae8fe\") " pod="openstack/rabbitmq-server-0" Oct 11 04:15:13 crc kubenswrapper[4798]: I1011 04:15:13.337810 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/c95f2c7f-03e4-4ded-8ff7-8d7804dae8fe-server-conf\") pod \"rabbitmq-server-0\" (UID: \"c95f2c7f-03e4-4ded-8ff7-8d7804dae8fe\") " pod="openstack/rabbitmq-server-0" Oct 11 04:15:13 crc kubenswrapper[4798]: I1011 04:15:13.344595 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/c95f2c7f-03e4-4ded-8ff7-8d7804dae8fe-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"c95f2c7f-03e4-4ded-8ff7-8d7804dae8fe\") " pod="openstack/rabbitmq-server-0" Oct 11 04:15:13 crc kubenswrapper[4798]: I1011 04:15:13.346702 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/c95f2c7f-03e4-4ded-8ff7-8d7804dae8fe-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"c95f2c7f-03e4-4ded-8ff7-8d7804dae8fe\") " pod="openstack/rabbitmq-server-0" Oct 11 04:15:13 crc kubenswrapper[4798]: I1011 04:15:13.347827 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/c95f2c7f-03e4-4ded-8ff7-8d7804dae8fe-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"c95f2c7f-03e4-4ded-8ff7-8d7804dae8fe\") " pod="openstack/rabbitmq-server-0" Oct 11 04:15:13 crc kubenswrapper[4798]: I1011 04:15:13.362527 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/c95f2c7f-03e4-4ded-8ff7-8d7804dae8fe-pod-info\") pod \"rabbitmq-server-0\" (UID: \"c95f2c7f-03e4-4ded-8ff7-8d7804dae8fe\") " pod="openstack/rabbitmq-server-0" Oct 11 04:15:13 crc kubenswrapper[4798]: I1011 04:15:13.366370 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/c95f2c7f-03e4-4ded-8ff7-8d7804dae8fe-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"c95f2c7f-03e4-4ded-8ff7-8d7804dae8fe\") " pod="openstack/rabbitmq-server-0" Oct 11 04:15:13 crc kubenswrapper[4798]: I1011 04:15:13.369335 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ndz6p\" (UniqueName: \"kubernetes.io/projected/c95f2c7f-03e4-4ded-8ff7-8d7804dae8fe-kube-api-access-ndz6p\") pod \"rabbitmq-server-0\" (UID: \"c95f2c7f-03e4-4ded-8ff7-8d7804dae8fe\") " pod="openstack/rabbitmq-server-0" Oct 11 04:15:13 crc kubenswrapper[4798]: I1011 04:15:13.415916 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-server-0\" (UID: \"c95f2c7f-03e4-4ded-8ff7-8d7804dae8fe\") " pod="openstack/rabbitmq-server-0" Oct 11 04:15:13 crc kubenswrapper[4798]: I1011 04:15:13.432458 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Oct 11 04:15:13 crc kubenswrapper[4798]: I1011 04:15:13.438919 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="eff19131-d13b-495f-94a3-097e030fd3cd" path="/var/lib/kubelet/pods/eff19131-d13b-495f-94a3-097e030fd3cd/volumes" Oct 11 04:15:13 crc kubenswrapper[4798]: I1011 04:15:13.519502 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Oct 11 04:15:13 crc kubenswrapper[4798]: I1011 04:15:13.642238 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/f7b18bbb-aead-4356-99b3-f1ee253f86e8-erlang-cookie-secret\") pod \"f7b18bbb-aead-4356-99b3-f1ee253f86e8\" (UID: \"f7b18bbb-aead-4356-99b3-f1ee253f86e8\") " Oct 11 04:15:13 crc kubenswrapper[4798]: I1011 04:15:13.646168 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/f7b18bbb-aead-4356-99b3-f1ee253f86e8-rabbitmq-tls\") pod \"f7b18bbb-aead-4356-99b3-f1ee253f86e8\" (UID: \"f7b18bbb-aead-4356-99b3-f1ee253f86e8\") " Oct 11 04:15:13 crc kubenswrapper[4798]: I1011 04:15:13.646254 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/f7b18bbb-aead-4356-99b3-f1ee253f86e8-rabbitmq-confd\") pod \"f7b18bbb-aead-4356-99b3-f1ee253f86e8\" (UID: \"f7b18bbb-aead-4356-99b3-f1ee253f86e8\") " Oct 11 04:15:13 crc kubenswrapper[4798]: I1011 04:15:13.646333 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/f7b18bbb-aead-4356-99b3-f1ee253f86e8-server-conf\") pod \"f7b18bbb-aead-4356-99b3-f1ee253f86e8\" (UID: \"f7b18bbb-aead-4356-99b3-f1ee253f86e8\") " Oct 11 04:15:13 crc kubenswrapper[4798]: I1011 04:15:13.646367 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/f7b18bbb-aead-4356-99b3-f1ee253f86e8-config-data\") pod \"f7b18bbb-aead-4356-99b3-f1ee253f86e8\" (UID: \"f7b18bbb-aead-4356-99b3-f1ee253f86e8\") " Oct 11 04:15:13 crc kubenswrapper[4798]: I1011 04:15:13.646402 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/f7b18bbb-aead-4356-99b3-f1ee253f86e8-plugins-conf\") pod \"f7b18bbb-aead-4356-99b3-f1ee253f86e8\" (UID: \"f7b18bbb-aead-4356-99b3-f1ee253f86e8\") " Oct 11 04:15:13 crc kubenswrapper[4798]: I1011 04:15:13.646458 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/f7b18bbb-aead-4356-99b3-f1ee253f86e8-rabbitmq-erlang-cookie\") pod \"f7b18bbb-aead-4356-99b3-f1ee253f86e8\" (UID: \"f7b18bbb-aead-4356-99b3-f1ee253f86e8\") " Oct 11 04:15:13 crc kubenswrapper[4798]: I1011 04:15:13.646478 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/f7b18bbb-aead-4356-99b3-f1ee253f86e8-pod-info\") pod \"f7b18bbb-aead-4356-99b3-f1ee253f86e8\" (UID: \"f7b18bbb-aead-4356-99b3-f1ee253f86e8\") " Oct 11 04:15:13 crc kubenswrapper[4798]: I1011 04:15:13.646518 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/f7b18bbb-aead-4356-99b3-f1ee253f86e8-rabbitmq-plugins\") pod \"f7b18bbb-aead-4356-99b3-f1ee253f86e8\" (UID: \"f7b18bbb-aead-4356-99b3-f1ee253f86e8\") " Oct 11 04:15:13 crc kubenswrapper[4798]: I1011 04:15:13.646560 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-57jnv\" (UniqueName: \"kubernetes.io/projected/f7b18bbb-aead-4356-99b3-f1ee253f86e8-kube-api-access-57jnv\") pod \"f7b18bbb-aead-4356-99b3-f1ee253f86e8\" (UID: \"f7b18bbb-aead-4356-99b3-f1ee253f86e8\") " Oct 11 04:15:13 crc kubenswrapper[4798]: I1011 04:15:13.646643 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"f7b18bbb-aead-4356-99b3-f1ee253f86e8\" (UID: \"f7b18bbb-aead-4356-99b3-f1ee253f86e8\") " Oct 11 04:15:13 crc kubenswrapper[4798]: I1011 04:15:13.648152 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f7b18bbb-aead-4356-99b3-f1ee253f86e8-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "f7b18bbb-aead-4356-99b3-f1ee253f86e8" (UID: "f7b18bbb-aead-4356-99b3-f1ee253f86e8"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 04:15:13 crc kubenswrapper[4798]: I1011 04:15:13.649376 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f7b18bbb-aead-4356-99b3-f1ee253f86e8-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "f7b18bbb-aead-4356-99b3-f1ee253f86e8" (UID: "f7b18bbb-aead-4356-99b3-f1ee253f86e8"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 04:15:13 crc kubenswrapper[4798]: I1011 04:15:13.650464 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f7b18bbb-aead-4356-99b3-f1ee253f86e8-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "f7b18bbb-aead-4356-99b3-f1ee253f86e8" (UID: "f7b18bbb-aead-4356-99b3-f1ee253f86e8"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 04:15:13 crc kubenswrapper[4798]: I1011 04:15:13.651468 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f7b18bbb-aead-4356-99b3-f1ee253f86e8-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "f7b18bbb-aead-4356-99b3-f1ee253f86e8" (UID: "f7b18bbb-aead-4356-99b3-f1ee253f86e8"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:15:13 crc kubenswrapper[4798]: I1011 04:15:13.655203 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage11-crc" (OuterVolumeSpecName: "persistence") pod "f7b18bbb-aead-4356-99b3-f1ee253f86e8" (UID: "f7b18bbb-aead-4356-99b3-f1ee253f86e8"). InnerVolumeSpecName "local-storage11-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Oct 11 04:15:13 crc kubenswrapper[4798]: I1011 04:15:13.655456 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f7b18bbb-aead-4356-99b3-f1ee253f86e8-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "f7b18bbb-aead-4356-99b3-f1ee253f86e8" (UID: "f7b18bbb-aead-4356-99b3-f1ee253f86e8"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 04:15:13 crc kubenswrapper[4798]: I1011 04:15:13.655893 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f7b18bbb-aead-4356-99b3-f1ee253f86e8-kube-api-access-57jnv" (OuterVolumeSpecName: "kube-api-access-57jnv") pod "f7b18bbb-aead-4356-99b3-f1ee253f86e8" (UID: "f7b18bbb-aead-4356-99b3-f1ee253f86e8"). InnerVolumeSpecName "kube-api-access-57jnv". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 04:15:13 crc kubenswrapper[4798]: I1011 04:15:13.656167 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/f7b18bbb-aead-4356-99b3-f1ee253f86e8-pod-info" (OuterVolumeSpecName: "pod-info") pod "f7b18bbb-aead-4356-99b3-f1ee253f86e8" (UID: "f7b18bbb-aead-4356-99b3-f1ee253f86e8"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Oct 11 04:15:13 crc kubenswrapper[4798]: I1011 04:15:13.697547 4798 generic.go:334] "Generic (PLEG): container finished" podID="f7b18bbb-aead-4356-99b3-f1ee253f86e8" containerID="faf08274c494cd3fe463ce8dd317d209c1bb6c482204375c1aa72645d1c00b9b" exitCode=0 Oct 11 04:15:13 crc kubenswrapper[4798]: I1011 04:15:13.697593 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"f7b18bbb-aead-4356-99b3-f1ee253f86e8","Type":"ContainerDied","Data":"faf08274c494cd3fe463ce8dd317d209c1bb6c482204375c1aa72645d1c00b9b"} Oct 11 04:15:13 crc kubenswrapper[4798]: I1011 04:15:13.697618 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"f7b18bbb-aead-4356-99b3-f1ee253f86e8","Type":"ContainerDied","Data":"5533a9c8bb97352dfddb862076432a6842f135c5df8aa24bba20d72a136c5271"} Oct 11 04:15:13 crc kubenswrapper[4798]: I1011 04:15:13.697637 4798 scope.go:117] "RemoveContainer" containerID="faf08274c494cd3fe463ce8dd317d209c1bb6c482204375c1aa72645d1c00b9b" Oct 11 04:15:13 crc kubenswrapper[4798]: I1011 04:15:13.697763 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Oct 11 04:15:13 crc kubenswrapper[4798]: I1011 04:15:13.720667 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f7b18bbb-aead-4356-99b3-f1ee253f86e8-server-conf" (OuterVolumeSpecName: "server-conf") pod "f7b18bbb-aead-4356-99b3-f1ee253f86e8" (UID: "f7b18bbb-aead-4356-99b3-f1ee253f86e8"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 04:15:13 crc kubenswrapper[4798]: I1011 04:15:13.725012 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f7b18bbb-aead-4356-99b3-f1ee253f86e8-config-data" (OuterVolumeSpecName: "config-data") pod "f7b18bbb-aead-4356-99b3-f1ee253f86e8" (UID: "f7b18bbb-aead-4356-99b3-f1ee253f86e8"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 04:15:13 crc kubenswrapper[4798]: I1011 04:15:13.749592 4798 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/f7b18bbb-aead-4356-99b3-f1ee253f86e8-server-conf\") on node \"crc\" DevicePath \"\"" Oct 11 04:15:13 crc kubenswrapper[4798]: I1011 04:15:13.749642 4798 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/f7b18bbb-aead-4356-99b3-f1ee253f86e8-config-data\") on node \"crc\" DevicePath \"\"" Oct 11 04:15:13 crc kubenswrapper[4798]: I1011 04:15:13.749654 4798 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/f7b18bbb-aead-4356-99b3-f1ee253f86e8-plugins-conf\") on node \"crc\" DevicePath \"\"" Oct 11 04:15:13 crc kubenswrapper[4798]: I1011 04:15:13.749669 4798 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/f7b18bbb-aead-4356-99b3-f1ee253f86e8-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Oct 11 04:15:13 crc kubenswrapper[4798]: I1011 04:15:13.749682 4798 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/f7b18bbb-aead-4356-99b3-f1ee253f86e8-pod-info\") on node \"crc\" DevicePath \"\"" Oct 11 04:15:13 crc kubenswrapper[4798]: I1011 04:15:13.749691 4798 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/f7b18bbb-aead-4356-99b3-f1ee253f86e8-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Oct 11 04:15:13 crc kubenswrapper[4798]: I1011 04:15:13.749703 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-57jnv\" (UniqueName: \"kubernetes.io/projected/f7b18bbb-aead-4356-99b3-f1ee253f86e8-kube-api-access-57jnv\") on node \"crc\" DevicePath \"\"" Oct 11 04:15:13 crc kubenswrapper[4798]: I1011 04:15:13.749742 4798 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") on node \"crc\" " Oct 11 04:15:13 crc kubenswrapper[4798]: I1011 04:15:13.749752 4798 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/f7b18bbb-aead-4356-99b3-f1ee253f86e8-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Oct 11 04:15:13 crc kubenswrapper[4798]: I1011 04:15:13.749761 4798 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/f7b18bbb-aead-4356-99b3-f1ee253f86e8-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Oct 11 04:15:13 crc kubenswrapper[4798]: I1011 04:15:13.765670 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f7b18bbb-aead-4356-99b3-f1ee253f86e8-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "f7b18bbb-aead-4356-99b3-f1ee253f86e8" (UID: "f7b18bbb-aead-4356-99b3-f1ee253f86e8"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 04:15:13 crc kubenswrapper[4798]: I1011 04:15:13.779212 4798 scope.go:117] "RemoveContainer" containerID="b460d449f79e7f0722844afeca28dbfdb51a6a5f706ded8dadd5bf0066b6d520" Oct 11 04:15:13 crc kubenswrapper[4798]: I1011 04:15:13.787232 4798 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage11-crc" (UniqueName: "kubernetes.io/local-volume/local-storage11-crc") on node "crc" Oct 11 04:15:13 crc kubenswrapper[4798]: I1011 04:15:13.806218 4798 scope.go:117] "RemoveContainer" containerID="faf08274c494cd3fe463ce8dd317d209c1bb6c482204375c1aa72645d1c00b9b" Oct 11 04:15:13 crc kubenswrapper[4798]: E1011 04:15:13.809050 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"faf08274c494cd3fe463ce8dd317d209c1bb6c482204375c1aa72645d1c00b9b\": container with ID starting with faf08274c494cd3fe463ce8dd317d209c1bb6c482204375c1aa72645d1c00b9b not found: ID does not exist" containerID="faf08274c494cd3fe463ce8dd317d209c1bb6c482204375c1aa72645d1c00b9b" Oct 11 04:15:13 crc kubenswrapper[4798]: I1011 04:15:13.809124 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"faf08274c494cd3fe463ce8dd317d209c1bb6c482204375c1aa72645d1c00b9b"} err="failed to get container status \"faf08274c494cd3fe463ce8dd317d209c1bb6c482204375c1aa72645d1c00b9b\": rpc error: code = NotFound desc = could not find container \"faf08274c494cd3fe463ce8dd317d209c1bb6c482204375c1aa72645d1c00b9b\": container with ID starting with faf08274c494cd3fe463ce8dd317d209c1bb6c482204375c1aa72645d1c00b9b not found: ID does not exist" Oct 11 04:15:13 crc kubenswrapper[4798]: I1011 04:15:13.809165 4798 scope.go:117] "RemoveContainer" containerID="b460d449f79e7f0722844afeca28dbfdb51a6a5f706ded8dadd5bf0066b6d520" Oct 11 04:15:13 crc kubenswrapper[4798]: E1011 04:15:13.809954 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b460d449f79e7f0722844afeca28dbfdb51a6a5f706ded8dadd5bf0066b6d520\": container with ID starting with b460d449f79e7f0722844afeca28dbfdb51a6a5f706ded8dadd5bf0066b6d520 not found: ID does not exist" containerID="b460d449f79e7f0722844afeca28dbfdb51a6a5f706ded8dadd5bf0066b6d520" Oct 11 04:15:13 crc kubenswrapper[4798]: I1011 04:15:13.809980 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b460d449f79e7f0722844afeca28dbfdb51a6a5f706ded8dadd5bf0066b6d520"} err="failed to get container status \"b460d449f79e7f0722844afeca28dbfdb51a6a5f706ded8dadd5bf0066b6d520\": rpc error: code = NotFound desc = could not find container \"b460d449f79e7f0722844afeca28dbfdb51a6a5f706ded8dadd5bf0066b6d520\": container with ID starting with b460d449f79e7f0722844afeca28dbfdb51a6a5f706ded8dadd5bf0066b6d520 not found: ID does not exist" Oct 11 04:15:13 crc kubenswrapper[4798]: I1011 04:15:13.852092 4798 reconciler_common.go:293] "Volume detached for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") on node \"crc\" DevicePath \"\"" Oct 11 04:15:13 crc kubenswrapper[4798]: I1011 04:15:13.852152 4798 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/f7b18bbb-aead-4356-99b3-f1ee253f86e8-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Oct 11 04:15:13 crc kubenswrapper[4798]: I1011 04:15:13.985183 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Oct 11 04:15:14 crc kubenswrapper[4798]: I1011 04:15:14.039943 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Oct 11 04:15:14 crc kubenswrapper[4798]: I1011 04:15:14.050930 4798 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Oct 11 04:15:14 crc kubenswrapper[4798]: I1011 04:15:14.063997 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Oct 11 04:15:14 crc kubenswrapper[4798]: E1011 04:15:14.064428 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f7b18bbb-aead-4356-99b3-f1ee253f86e8" containerName="rabbitmq" Oct 11 04:15:14 crc kubenswrapper[4798]: I1011 04:15:14.064442 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="f7b18bbb-aead-4356-99b3-f1ee253f86e8" containerName="rabbitmq" Oct 11 04:15:14 crc kubenswrapper[4798]: E1011 04:15:14.064478 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f7b18bbb-aead-4356-99b3-f1ee253f86e8" containerName="setup-container" Oct 11 04:15:14 crc kubenswrapper[4798]: I1011 04:15:14.064487 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="f7b18bbb-aead-4356-99b3-f1ee253f86e8" containerName="setup-container" Oct 11 04:15:14 crc kubenswrapper[4798]: I1011 04:15:14.064692 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="f7b18bbb-aead-4356-99b3-f1ee253f86e8" containerName="rabbitmq" Oct 11 04:15:14 crc kubenswrapper[4798]: I1011 04:15:14.065868 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Oct 11 04:15:14 crc kubenswrapper[4798]: I1011 04:15:14.068480 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Oct 11 04:15:14 crc kubenswrapper[4798]: I1011 04:15:14.069366 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Oct 11 04:15:14 crc kubenswrapper[4798]: I1011 04:15:14.070295 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-qcx8n" Oct 11 04:15:14 crc kubenswrapper[4798]: I1011 04:15:14.071702 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Oct 11 04:15:14 crc kubenswrapper[4798]: I1011 04:15:14.071989 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Oct 11 04:15:14 crc kubenswrapper[4798]: I1011 04:15:14.072204 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-config-data" Oct 11 04:15:14 crc kubenswrapper[4798]: I1011 04:15:14.073563 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-cell1-svc" Oct 11 04:15:14 crc kubenswrapper[4798]: I1011 04:15:14.082924 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Oct 11 04:15:14 crc kubenswrapper[4798]: I1011 04:15:14.260219 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/cdfee9bc-39ae-48a4-9645-7191a2ae2cd5-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"cdfee9bc-39ae-48a4-9645-7191a2ae2cd5\") " pod="openstack/rabbitmq-cell1-server-0" Oct 11 04:15:14 crc kubenswrapper[4798]: I1011 04:15:14.260298 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/cdfee9bc-39ae-48a4-9645-7191a2ae2cd5-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"cdfee9bc-39ae-48a4-9645-7191a2ae2cd5\") " pod="openstack/rabbitmq-cell1-server-0" Oct 11 04:15:14 crc kubenswrapper[4798]: I1011 04:15:14.260376 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/cdfee9bc-39ae-48a4-9645-7191a2ae2cd5-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"cdfee9bc-39ae-48a4-9645-7191a2ae2cd5\") " pod="openstack/rabbitmq-cell1-server-0" Oct 11 04:15:14 crc kubenswrapper[4798]: I1011 04:15:14.260427 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/cdfee9bc-39ae-48a4-9645-7191a2ae2cd5-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"cdfee9bc-39ae-48a4-9645-7191a2ae2cd5\") " pod="openstack/rabbitmq-cell1-server-0" Oct 11 04:15:14 crc kubenswrapper[4798]: I1011 04:15:14.260485 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pr2l6\" (UniqueName: \"kubernetes.io/projected/cdfee9bc-39ae-48a4-9645-7191a2ae2cd5-kube-api-access-pr2l6\") pod \"rabbitmq-cell1-server-0\" (UID: \"cdfee9bc-39ae-48a4-9645-7191a2ae2cd5\") " pod="openstack/rabbitmq-cell1-server-0" Oct 11 04:15:14 crc kubenswrapper[4798]: I1011 04:15:14.260538 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/cdfee9bc-39ae-48a4-9645-7191a2ae2cd5-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"cdfee9bc-39ae-48a4-9645-7191a2ae2cd5\") " pod="openstack/rabbitmq-cell1-server-0" Oct 11 04:15:14 crc kubenswrapper[4798]: I1011 04:15:14.260578 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"cdfee9bc-39ae-48a4-9645-7191a2ae2cd5\") " pod="openstack/rabbitmq-cell1-server-0" Oct 11 04:15:14 crc kubenswrapper[4798]: I1011 04:15:14.260609 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/cdfee9bc-39ae-48a4-9645-7191a2ae2cd5-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"cdfee9bc-39ae-48a4-9645-7191a2ae2cd5\") " pod="openstack/rabbitmq-cell1-server-0" Oct 11 04:15:14 crc kubenswrapper[4798]: I1011 04:15:14.260633 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/cdfee9bc-39ae-48a4-9645-7191a2ae2cd5-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"cdfee9bc-39ae-48a4-9645-7191a2ae2cd5\") " pod="openstack/rabbitmq-cell1-server-0" Oct 11 04:15:14 crc kubenswrapper[4798]: I1011 04:15:14.260652 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/cdfee9bc-39ae-48a4-9645-7191a2ae2cd5-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"cdfee9bc-39ae-48a4-9645-7191a2ae2cd5\") " pod="openstack/rabbitmq-cell1-server-0" Oct 11 04:15:14 crc kubenswrapper[4798]: I1011 04:15:14.260677 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/cdfee9bc-39ae-48a4-9645-7191a2ae2cd5-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"cdfee9bc-39ae-48a4-9645-7191a2ae2cd5\") " pod="openstack/rabbitmq-cell1-server-0" Oct 11 04:15:14 crc kubenswrapper[4798]: I1011 04:15:14.361946 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/cdfee9bc-39ae-48a4-9645-7191a2ae2cd5-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"cdfee9bc-39ae-48a4-9645-7191a2ae2cd5\") " pod="openstack/rabbitmq-cell1-server-0" Oct 11 04:15:14 crc kubenswrapper[4798]: I1011 04:15:14.362015 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/cdfee9bc-39ae-48a4-9645-7191a2ae2cd5-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"cdfee9bc-39ae-48a4-9645-7191a2ae2cd5\") " pod="openstack/rabbitmq-cell1-server-0" Oct 11 04:15:14 crc kubenswrapper[4798]: I1011 04:15:14.362055 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/cdfee9bc-39ae-48a4-9645-7191a2ae2cd5-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"cdfee9bc-39ae-48a4-9645-7191a2ae2cd5\") " pod="openstack/rabbitmq-cell1-server-0" Oct 11 04:15:14 crc kubenswrapper[4798]: I1011 04:15:14.362114 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pr2l6\" (UniqueName: \"kubernetes.io/projected/cdfee9bc-39ae-48a4-9645-7191a2ae2cd5-kube-api-access-pr2l6\") pod \"rabbitmq-cell1-server-0\" (UID: \"cdfee9bc-39ae-48a4-9645-7191a2ae2cd5\") " pod="openstack/rabbitmq-cell1-server-0" Oct 11 04:15:14 crc kubenswrapper[4798]: I1011 04:15:14.362166 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/cdfee9bc-39ae-48a4-9645-7191a2ae2cd5-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"cdfee9bc-39ae-48a4-9645-7191a2ae2cd5\") " pod="openstack/rabbitmq-cell1-server-0" Oct 11 04:15:14 crc kubenswrapper[4798]: I1011 04:15:14.362210 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"cdfee9bc-39ae-48a4-9645-7191a2ae2cd5\") " pod="openstack/rabbitmq-cell1-server-0" Oct 11 04:15:14 crc kubenswrapper[4798]: I1011 04:15:14.362239 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/cdfee9bc-39ae-48a4-9645-7191a2ae2cd5-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"cdfee9bc-39ae-48a4-9645-7191a2ae2cd5\") " pod="openstack/rabbitmq-cell1-server-0" Oct 11 04:15:14 crc kubenswrapper[4798]: I1011 04:15:14.362261 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/cdfee9bc-39ae-48a4-9645-7191a2ae2cd5-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"cdfee9bc-39ae-48a4-9645-7191a2ae2cd5\") " pod="openstack/rabbitmq-cell1-server-0" Oct 11 04:15:14 crc kubenswrapper[4798]: I1011 04:15:14.362279 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/cdfee9bc-39ae-48a4-9645-7191a2ae2cd5-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"cdfee9bc-39ae-48a4-9645-7191a2ae2cd5\") " pod="openstack/rabbitmq-cell1-server-0" Oct 11 04:15:14 crc kubenswrapper[4798]: I1011 04:15:14.362304 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/cdfee9bc-39ae-48a4-9645-7191a2ae2cd5-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"cdfee9bc-39ae-48a4-9645-7191a2ae2cd5\") " pod="openstack/rabbitmq-cell1-server-0" Oct 11 04:15:14 crc kubenswrapper[4798]: I1011 04:15:14.362347 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/cdfee9bc-39ae-48a4-9645-7191a2ae2cd5-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"cdfee9bc-39ae-48a4-9645-7191a2ae2cd5\") " pod="openstack/rabbitmq-cell1-server-0" Oct 11 04:15:14 crc kubenswrapper[4798]: I1011 04:15:14.362755 4798 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"cdfee9bc-39ae-48a4-9645-7191a2ae2cd5\") device mount path \"/mnt/openstack/pv11\"" pod="openstack/rabbitmq-cell1-server-0" Oct 11 04:15:14 crc kubenswrapper[4798]: I1011 04:15:14.363114 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/cdfee9bc-39ae-48a4-9645-7191a2ae2cd5-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"cdfee9bc-39ae-48a4-9645-7191a2ae2cd5\") " pod="openstack/rabbitmq-cell1-server-0" Oct 11 04:15:14 crc kubenswrapper[4798]: I1011 04:15:14.363195 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/cdfee9bc-39ae-48a4-9645-7191a2ae2cd5-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"cdfee9bc-39ae-48a4-9645-7191a2ae2cd5\") " pod="openstack/rabbitmq-cell1-server-0" Oct 11 04:15:14 crc kubenswrapper[4798]: I1011 04:15:14.363983 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/cdfee9bc-39ae-48a4-9645-7191a2ae2cd5-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"cdfee9bc-39ae-48a4-9645-7191a2ae2cd5\") " pod="openstack/rabbitmq-cell1-server-0" Oct 11 04:15:14 crc kubenswrapper[4798]: I1011 04:15:14.364105 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/cdfee9bc-39ae-48a4-9645-7191a2ae2cd5-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"cdfee9bc-39ae-48a4-9645-7191a2ae2cd5\") " pod="openstack/rabbitmq-cell1-server-0" Oct 11 04:15:14 crc kubenswrapper[4798]: I1011 04:15:14.364181 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/cdfee9bc-39ae-48a4-9645-7191a2ae2cd5-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"cdfee9bc-39ae-48a4-9645-7191a2ae2cd5\") " pod="openstack/rabbitmq-cell1-server-0" Oct 11 04:15:14 crc kubenswrapper[4798]: I1011 04:15:14.368250 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/cdfee9bc-39ae-48a4-9645-7191a2ae2cd5-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"cdfee9bc-39ae-48a4-9645-7191a2ae2cd5\") " pod="openstack/rabbitmq-cell1-server-0" Oct 11 04:15:14 crc kubenswrapper[4798]: I1011 04:15:14.368365 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/cdfee9bc-39ae-48a4-9645-7191a2ae2cd5-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"cdfee9bc-39ae-48a4-9645-7191a2ae2cd5\") " pod="openstack/rabbitmq-cell1-server-0" Oct 11 04:15:14 crc kubenswrapper[4798]: I1011 04:15:14.368755 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/cdfee9bc-39ae-48a4-9645-7191a2ae2cd5-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"cdfee9bc-39ae-48a4-9645-7191a2ae2cd5\") " pod="openstack/rabbitmq-cell1-server-0" Oct 11 04:15:14 crc kubenswrapper[4798]: I1011 04:15:14.370268 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/cdfee9bc-39ae-48a4-9645-7191a2ae2cd5-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"cdfee9bc-39ae-48a4-9645-7191a2ae2cd5\") " pod="openstack/rabbitmq-cell1-server-0" Oct 11 04:15:14 crc kubenswrapper[4798]: I1011 04:15:14.389498 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pr2l6\" (UniqueName: \"kubernetes.io/projected/cdfee9bc-39ae-48a4-9645-7191a2ae2cd5-kube-api-access-pr2l6\") pod \"rabbitmq-cell1-server-0\" (UID: \"cdfee9bc-39ae-48a4-9645-7191a2ae2cd5\") " pod="openstack/rabbitmq-cell1-server-0" Oct 11 04:15:14 crc kubenswrapper[4798]: I1011 04:15:14.396683 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"cdfee9bc-39ae-48a4-9645-7191a2ae2cd5\") " pod="openstack/rabbitmq-cell1-server-0" Oct 11 04:15:14 crc kubenswrapper[4798]: I1011 04:15:14.401955 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Oct 11 04:15:14 crc kubenswrapper[4798]: I1011 04:15:14.733805 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"c95f2c7f-03e4-4ded-8ff7-8d7804dae8fe","Type":"ContainerStarted","Data":"642e93307a5b02bee240d0b820948d402c67540837df7ba00cebf74dbf4309f8"} Oct 11 04:15:14 crc kubenswrapper[4798]: I1011 04:15:14.881697 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Oct 11 04:15:15 crc kubenswrapper[4798]: W1011 04:15:15.001299 4798 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podcdfee9bc_39ae_48a4_9645_7191a2ae2cd5.slice/crio-7ec9a9656b6aec7262cf942ce4de45330d7e3164616c4eb2bc83cecd089e10bc WatchSource:0}: Error finding container 7ec9a9656b6aec7262cf942ce4de45330d7e3164616c4eb2bc83cecd089e10bc: Status 404 returned error can't find the container with id 7ec9a9656b6aec7262cf942ce4de45330d7e3164616c4eb2bc83cecd089e10bc Oct 11 04:15:15 crc kubenswrapper[4798]: I1011 04:15:15.444657 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f7b18bbb-aead-4356-99b3-f1ee253f86e8" path="/var/lib/kubelet/pods/f7b18bbb-aead-4356-99b3-f1ee253f86e8/volumes" Oct 11 04:15:15 crc kubenswrapper[4798]: I1011 04:15:15.743784 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"cdfee9bc-39ae-48a4-9645-7191a2ae2cd5","Type":"ContainerStarted","Data":"7ec9a9656b6aec7262cf942ce4de45330d7e3164616c4eb2bc83cecd089e10bc"} Oct 11 04:15:15 crc kubenswrapper[4798]: I1011 04:15:15.746722 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"c95f2c7f-03e4-4ded-8ff7-8d7804dae8fe","Type":"ContainerStarted","Data":"0d778cd278c428f53b7d93f074cbd3dd114fc9c23a125898cf315c89f44f3efa"} Oct 11 04:15:15 crc kubenswrapper[4798]: I1011 04:15:15.927448 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-578b8d767c-s6rjn"] Oct 11 04:15:15 crc kubenswrapper[4798]: I1011 04:15:15.929160 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-578b8d767c-s6rjn" Oct 11 04:15:15 crc kubenswrapper[4798]: I1011 04:15:15.932447 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-edpm-ipam" Oct 11 04:15:15 crc kubenswrapper[4798]: I1011 04:15:15.940012 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-578b8d767c-s6rjn"] Oct 11 04:15:16 crc kubenswrapper[4798]: I1011 04:15:16.098030 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/ee0fdd8b-8eab-4bff-833a-92acacd45ea4-openstack-edpm-ipam\") pod \"dnsmasq-dns-578b8d767c-s6rjn\" (UID: \"ee0fdd8b-8eab-4bff-833a-92acacd45ea4\") " pod="openstack/dnsmasq-dns-578b8d767c-s6rjn" Oct 11 04:15:16 crc kubenswrapper[4798]: I1011 04:15:16.098101 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ee0fdd8b-8eab-4bff-833a-92acacd45ea4-ovsdbserver-sb\") pod \"dnsmasq-dns-578b8d767c-s6rjn\" (UID: \"ee0fdd8b-8eab-4bff-833a-92acacd45ea4\") " pod="openstack/dnsmasq-dns-578b8d767c-s6rjn" Oct 11 04:15:16 crc kubenswrapper[4798]: I1011 04:15:16.098131 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ee0fdd8b-8eab-4bff-833a-92acacd45ea4-dns-svc\") pod \"dnsmasq-dns-578b8d767c-s6rjn\" (UID: \"ee0fdd8b-8eab-4bff-833a-92acacd45ea4\") " pod="openstack/dnsmasq-dns-578b8d767c-s6rjn" Oct 11 04:15:16 crc kubenswrapper[4798]: I1011 04:15:16.098243 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kjf89\" (UniqueName: \"kubernetes.io/projected/ee0fdd8b-8eab-4bff-833a-92acacd45ea4-kube-api-access-kjf89\") pod \"dnsmasq-dns-578b8d767c-s6rjn\" (UID: \"ee0fdd8b-8eab-4bff-833a-92acacd45ea4\") " pod="openstack/dnsmasq-dns-578b8d767c-s6rjn" Oct 11 04:15:16 crc kubenswrapper[4798]: I1011 04:15:16.098325 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ee0fdd8b-8eab-4bff-833a-92acacd45ea4-config\") pod \"dnsmasq-dns-578b8d767c-s6rjn\" (UID: \"ee0fdd8b-8eab-4bff-833a-92acacd45ea4\") " pod="openstack/dnsmasq-dns-578b8d767c-s6rjn" Oct 11 04:15:16 crc kubenswrapper[4798]: I1011 04:15:16.098352 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ee0fdd8b-8eab-4bff-833a-92acacd45ea4-ovsdbserver-nb\") pod \"dnsmasq-dns-578b8d767c-s6rjn\" (UID: \"ee0fdd8b-8eab-4bff-833a-92acacd45ea4\") " pod="openstack/dnsmasq-dns-578b8d767c-s6rjn" Oct 11 04:15:16 crc kubenswrapper[4798]: I1011 04:15:16.203902 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/ee0fdd8b-8eab-4bff-833a-92acacd45ea4-openstack-edpm-ipam\") pod \"dnsmasq-dns-578b8d767c-s6rjn\" (UID: \"ee0fdd8b-8eab-4bff-833a-92acacd45ea4\") " pod="openstack/dnsmasq-dns-578b8d767c-s6rjn" Oct 11 04:15:16 crc kubenswrapper[4798]: I1011 04:15:16.204313 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ee0fdd8b-8eab-4bff-833a-92acacd45ea4-ovsdbserver-sb\") pod \"dnsmasq-dns-578b8d767c-s6rjn\" (UID: \"ee0fdd8b-8eab-4bff-833a-92acacd45ea4\") " pod="openstack/dnsmasq-dns-578b8d767c-s6rjn" Oct 11 04:15:16 crc kubenswrapper[4798]: I1011 04:15:16.204641 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ee0fdd8b-8eab-4bff-833a-92acacd45ea4-dns-svc\") pod \"dnsmasq-dns-578b8d767c-s6rjn\" (UID: \"ee0fdd8b-8eab-4bff-833a-92acacd45ea4\") " pod="openstack/dnsmasq-dns-578b8d767c-s6rjn" Oct 11 04:15:16 crc kubenswrapper[4798]: I1011 04:15:16.204814 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kjf89\" (UniqueName: \"kubernetes.io/projected/ee0fdd8b-8eab-4bff-833a-92acacd45ea4-kube-api-access-kjf89\") pod \"dnsmasq-dns-578b8d767c-s6rjn\" (UID: \"ee0fdd8b-8eab-4bff-833a-92acacd45ea4\") " pod="openstack/dnsmasq-dns-578b8d767c-s6rjn" Oct 11 04:15:16 crc kubenswrapper[4798]: I1011 04:15:16.205010 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ee0fdd8b-8eab-4bff-833a-92acacd45ea4-config\") pod \"dnsmasq-dns-578b8d767c-s6rjn\" (UID: \"ee0fdd8b-8eab-4bff-833a-92acacd45ea4\") " pod="openstack/dnsmasq-dns-578b8d767c-s6rjn" Oct 11 04:15:16 crc kubenswrapper[4798]: I1011 04:15:16.205121 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ee0fdd8b-8eab-4bff-833a-92acacd45ea4-ovsdbserver-nb\") pod \"dnsmasq-dns-578b8d767c-s6rjn\" (UID: \"ee0fdd8b-8eab-4bff-833a-92acacd45ea4\") " pod="openstack/dnsmasq-dns-578b8d767c-s6rjn" Oct 11 04:15:16 crc kubenswrapper[4798]: I1011 04:15:16.205262 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ee0fdd8b-8eab-4bff-833a-92acacd45ea4-ovsdbserver-sb\") pod \"dnsmasq-dns-578b8d767c-s6rjn\" (UID: \"ee0fdd8b-8eab-4bff-833a-92acacd45ea4\") " pod="openstack/dnsmasq-dns-578b8d767c-s6rjn" Oct 11 04:15:16 crc kubenswrapper[4798]: I1011 04:15:16.205299 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ee0fdd8b-8eab-4bff-833a-92acacd45ea4-dns-svc\") pod \"dnsmasq-dns-578b8d767c-s6rjn\" (UID: \"ee0fdd8b-8eab-4bff-833a-92acacd45ea4\") " pod="openstack/dnsmasq-dns-578b8d767c-s6rjn" Oct 11 04:15:16 crc kubenswrapper[4798]: I1011 04:15:16.206204 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ee0fdd8b-8eab-4bff-833a-92acacd45ea4-ovsdbserver-nb\") pod \"dnsmasq-dns-578b8d767c-s6rjn\" (UID: \"ee0fdd8b-8eab-4bff-833a-92acacd45ea4\") " pod="openstack/dnsmasq-dns-578b8d767c-s6rjn" Oct 11 04:15:16 crc kubenswrapper[4798]: I1011 04:15:16.206469 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ee0fdd8b-8eab-4bff-833a-92acacd45ea4-config\") pod \"dnsmasq-dns-578b8d767c-s6rjn\" (UID: \"ee0fdd8b-8eab-4bff-833a-92acacd45ea4\") " pod="openstack/dnsmasq-dns-578b8d767c-s6rjn" Oct 11 04:15:16 crc kubenswrapper[4798]: I1011 04:15:16.207550 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/ee0fdd8b-8eab-4bff-833a-92acacd45ea4-openstack-edpm-ipam\") pod \"dnsmasq-dns-578b8d767c-s6rjn\" (UID: \"ee0fdd8b-8eab-4bff-833a-92acacd45ea4\") " pod="openstack/dnsmasq-dns-578b8d767c-s6rjn" Oct 11 04:15:16 crc kubenswrapper[4798]: I1011 04:15:16.232311 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kjf89\" (UniqueName: \"kubernetes.io/projected/ee0fdd8b-8eab-4bff-833a-92acacd45ea4-kube-api-access-kjf89\") pod \"dnsmasq-dns-578b8d767c-s6rjn\" (UID: \"ee0fdd8b-8eab-4bff-833a-92acacd45ea4\") " pod="openstack/dnsmasq-dns-578b8d767c-s6rjn" Oct 11 04:15:16 crc kubenswrapper[4798]: I1011 04:15:16.253849 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-578b8d767c-s6rjn" Oct 11 04:15:16 crc kubenswrapper[4798]: I1011 04:15:16.771944 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-578b8d767c-s6rjn"] Oct 11 04:15:17 crc kubenswrapper[4798]: I1011 04:15:17.780504 4798 generic.go:334] "Generic (PLEG): container finished" podID="ee0fdd8b-8eab-4bff-833a-92acacd45ea4" containerID="1ffb1fafab0d80c8351fc3eeb3cafa17815e58378dd7b7b5813ab1e85b741730" exitCode=0 Oct 11 04:15:17 crc kubenswrapper[4798]: I1011 04:15:17.780553 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-578b8d767c-s6rjn" event={"ID":"ee0fdd8b-8eab-4bff-833a-92acacd45ea4","Type":"ContainerDied","Data":"1ffb1fafab0d80c8351fc3eeb3cafa17815e58378dd7b7b5813ab1e85b741730"} Oct 11 04:15:17 crc kubenswrapper[4798]: I1011 04:15:17.781304 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-578b8d767c-s6rjn" event={"ID":"ee0fdd8b-8eab-4bff-833a-92acacd45ea4","Type":"ContainerStarted","Data":"2cf5def8440ced1d3fd62f54f49c9c3218c334a485b347a43efedec356d51975"} Oct 11 04:15:17 crc kubenswrapper[4798]: I1011 04:15:17.784975 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"cdfee9bc-39ae-48a4-9645-7191a2ae2cd5","Type":"ContainerStarted","Data":"2e7b928dd4ae5ed2adb389e97405dfcbd3e33264b7673d2fd1845e60e735fe94"} Oct 11 04:15:18 crc kubenswrapper[4798]: I1011 04:15:18.797216 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-578b8d767c-s6rjn" event={"ID":"ee0fdd8b-8eab-4bff-833a-92acacd45ea4","Type":"ContainerStarted","Data":"c7906bd0ee5b41ec4203572994cd65a0bc42b6509707ac846b65e956571cc827"} Oct 11 04:15:18 crc kubenswrapper[4798]: I1011 04:15:18.816813 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-578b8d767c-s6rjn" podStartSLOduration=3.816796156 podStartE2EDuration="3.816796156s" podCreationTimestamp="2025-10-11 04:15:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 04:15:18.81362259 +0000 UTC m=+1214.149912296" watchObservedRunningTime="2025-10-11 04:15:18.816796156 +0000 UTC m=+1214.153085842" Oct 11 04:15:19 crc kubenswrapper[4798]: I1011 04:15:19.806718 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-578b8d767c-s6rjn" Oct 11 04:15:26 crc kubenswrapper[4798]: I1011 04:15:26.255662 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-578b8d767c-s6rjn" Oct 11 04:15:26 crc kubenswrapper[4798]: I1011 04:15:26.322499 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-68d4b6d797-k76dd"] Oct 11 04:15:26 crc kubenswrapper[4798]: I1011 04:15:26.322782 4798 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-68d4b6d797-k76dd" podUID="fa13805f-6f19-41b8-a0a7-5a18e9e1c908" containerName="dnsmasq-dns" containerID="cri-o://1e15afb6381e5aa41d957825a14940bb8501f404457bcae1758e35163b47ca3e" gracePeriod=10 Oct 11 04:15:26 crc kubenswrapper[4798]: I1011 04:15:26.481255 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-fbc59fbb7-l6zlt"] Oct 11 04:15:26 crc kubenswrapper[4798]: I1011 04:15:26.491157 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-fbc59fbb7-l6zlt" Oct 11 04:15:26 crc kubenswrapper[4798]: I1011 04:15:26.506910 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-fbc59fbb7-l6zlt"] Oct 11 04:15:26 crc kubenswrapper[4798]: E1011 04:15:26.536110 4798 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podfa13805f_6f19_41b8_a0a7_5a18e9e1c908.slice/crio-conmon-1e15afb6381e5aa41d957825a14940bb8501f404457bcae1758e35163b47ca3e.scope\": RecentStats: unable to find data in memory cache]" Oct 11 04:15:26 crc kubenswrapper[4798]: I1011 04:15:26.613717 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/1ecaa1d8-972c-4d57-84ef-9c86b16c3c36-ovsdbserver-nb\") pod \"dnsmasq-dns-fbc59fbb7-l6zlt\" (UID: \"1ecaa1d8-972c-4d57-84ef-9c86b16c3c36\") " pod="openstack/dnsmasq-dns-fbc59fbb7-l6zlt" Oct 11 04:15:26 crc kubenswrapper[4798]: I1011 04:15:26.613766 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1ecaa1d8-972c-4d57-84ef-9c86b16c3c36-config\") pod \"dnsmasq-dns-fbc59fbb7-l6zlt\" (UID: \"1ecaa1d8-972c-4d57-84ef-9c86b16c3c36\") " pod="openstack/dnsmasq-dns-fbc59fbb7-l6zlt" Oct 11 04:15:26 crc kubenswrapper[4798]: I1011 04:15:26.613808 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/1ecaa1d8-972c-4d57-84ef-9c86b16c3c36-openstack-edpm-ipam\") pod \"dnsmasq-dns-fbc59fbb7-l6zlt\" (UID: \"1ecaa1d8-972c-4d57-84ef-9c86b16c3c36\") " pod="openstack/dnsmasq-dns-fbc59fbb7-l6zlt" Oct 11 04:15:26 crc kubenswrapper[4798]: I1011 04:15:26.613827 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7v5tv\" (UniqueName: \"kubernetes.io/projected/1ecaa1d8-972c-4d57-84ef-9c86b16c3c36-kube-api-access-7v5tv\") pod \"dnsmasq-dns-fbc59fbb7-l6zlt\" (UID: \"1ecaa1d8-972c-4d57-84ef-9c86b16c3c36\") " pod="openstack/dnsmasq-dns-fbc59fbb7-l6zlt" Oct 11 04:15:26 crc kubenswrapper[4798]: I1011 04:15:26.613867 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1ecaa1d8-972c-4d57-84ef-9c86b16c3c36-dns-svc\") pod \"dnsmasq-dns-fbc59fbb7-l6zlt\" (UID: \"1ecaa1d8-972c-4d57-84ef-9c86b16c3c36\") " pod="openstack/dnsmasq-dns-fbc59fbb7-l6zlt" Oct 11 04:15:26 crc kubenswrapper[4798]: I1011 04:15:26.613884 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/1ecaa1d8-972c-4d57-84ef-9c86b16c3c36-ovsdbserver-sb\") pod \"dnsmasq-dns-fbc59fbb7-l6zlt\" (UID: \"1ecaa1d8-972c-4d57-84ef-9c86b16c3c36\") " pod="openstack/dnsmasq-dns-fbc59fbb7-l6zlt" Oct 11 04:15:26 crc kubenswrapper[4798]: I1011 04:15:26.716436 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/1ecaa1d8-972c-4d57-84ef-9c86b16c3c36-ovsdbserver-nb\") pod \"dnsmasq-dns-fbc59fbb7-l6zlt\" (UID: \"1ecaa1d8-972c-4d57-84ef-9c86b16c3c36\") " pod="openstack/dnsmasq-dns-fbc59fbb7-l6zlt" Oct 11 04:15:26 crc kubenswrapper[4798]: I1011 04:15:26.717030 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1ecaa1d8-972c-4d57-84ef-9c86b16c3c36-config\") pod \"dnsmasq-dns-fbc59fbb7-l6zlt\" (UID: \"1ecaa1d8-972c-4d57-84ef-9c86b16c3c36\") " pod="openstack/dnsmasq-dns-fbc59fbb7-l6zlt" Oct 11 04:15:26 crc kubenswrapper[4798]: I1011 04:15:26.717083 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/1ecaa1d8-972c-4d57-84ef-9c86b16c3c36-openstack-edpm-ipam\") pod \"dnsmasq-dns-fbc59fbb7-l6zlt\" (UID: \"1ecaa1d8-972c-4d57-84ef-9c86b16c3c36\") " pod="openstack/dnsmasq-dns-fbc59fbb7-l6zlt" Oct 11 04:15:26 crc kubenswrapper[4798]: I1011 04:15:26.717103 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7v5tv\" (UniqueName: \"kubernetes.io/projected/1ecaa1d8-972c-4d57-84ef-9c86b16c3c36-kube-api-access-7v5tv\") pod \"dnsmasq-dns-fbc59fbb7-l6zlt\" (UID: \"1ecaa1d8-972c-4d57-84ef-9c86b16c3c36\") " pod="openstack/dnsmasq-dns-fbc59fbb7-l6zlt" Oct 11 04:15:26 crc kubenswrapper[4798]: I1011 04:15:26.717155 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1ecaa1d8-972c-4d57-84ef-9c86b16c3c36-dns-svc\") pod \"dnsmasq-dns-fbc59fbb7-l6zlt\" (UID: \"1ecaa1d8-972c-4d57-84ef-9c86b16c3c36\") " pod="openstack/dnsmasq-dns-fbc59fbb7-l6zlt" Oct 11 04:15:26 crc kubenswrapper[4798]: I1011 04:15:26.717174 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/1ecaa1d8-972c-4d57-84ef-9c86b16c3c36-ovsdbserver-sb\") pod \"dnsmasq-dns-fbc59fbb7-l6zlt\" (UID: \"1ecaa1d8-972c-4d57-84ef-9c86b16c3c36\") " pod="openstack/dnsmasq-dns-fbc59fbb7-l6zlt" Oct 11 04:15:26 crc kubenswrapper[4798]: I1011 04:15:26.717663 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/1ecaa1d8-972c-4d57-84ef-9c86b16c3c36-ovsdbserver-nb\") pod \"dnsmasq-dns-fbc59fbb7-l6zlt\" (UID: \"1ecaa1d8-972c-4d57-84ef-9c86b16c3c36\") " pod="openstack/dnsmasq-dns-fbc59fbb7-l6zlt" Oct 11 04:15:26 crc kubenswrapper[4798]: I1011 04:15:26.719113 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/1ecaa1d8-972c-4d57-84ef-9c86b16c3c36-ovsdbserver-sb\") pod \"dnsmasq-dns-fbc59fbb7-l6zlt\" (UID: \"1ecaa1d8-972c-4d57-84ef-9c86b16c3c36\") " pod="openstack/dnsmasq-dns-fbc59fbb7-l6zlt" Oct 11 04:15:26 crc kubenswrapper[4798]: I1011 04:15:26.720328 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1ecaa1d8-972c-4d57-84ef-9c86b16c3c36-config\") pod \"dnsmasq-dns-fbc59fbb7-l6zlt\" (UID: \"1ecaa1d8-972c-4d57-84ef-9c86b16c3c36\") " pod="openstack/dnsmasq-dns-fbc59fbb7-l6zlt" Oct 11 04:15:26 crc kubenswrapper[4798]: I1011 04:15:26.720899 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/1ecaa1d8-972c-4d57-84ef-9c86b16c3c36-openstack-edpm-ipam\") pod \"dnsmasq-dns-fbc59fbb7-l6zlt\" (UID: \"1ecaa1d8-972c-4d57-84ef-9c86b16c3c36\") " pod="openstack/dnsmasq-dns-fbc59fbb7-l6zlt" Oct 11 04:15:26 crc kubenswrapper[4798]: I1011 04:15:26.721252 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1ecaa1d8-972c-4d57-84ef-9c86b16c3c36-dns-svc\") pod \"dnsmasq-dns-fbc59fbb7-l6zlt\" (UID: \"1ecaa1d8-972c-4d57-84ef-9c86b16c3c36\") " pod="openstack/dnsmasq-dns-fbc59fbb7-l6zlt" Oct 11 04:15:26 crc kubenswrapper[4798]: I1011 04:15:26.743992 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7v5tv\" (UniqueName: \"kubernetes.io/projected/1ecaa1d8-972c-4d57-84ef-9c86b16c3c36-kube-api-access-7v5tv\") pod \"dnsmasq-dns-fbc59fbb7-l6zlt\" (UID: \"1ecaa1d8-972c-4d57-84ef-9c86b16c3c36\") " pod="openstack/dnsmasq-dns-fbc59fbb7-l6zlt" Oct 11 04:15:26 crc kubenswrapper[4798]: I1011 04:15:26.840315 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-68d4b6d797-k76dd" Oct 11 04:15:26 crc kubenswrapper[4798]: I1011 04:15:26.840638 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-fbc59fbb7-l6zlt" Oct 11 04:15:26 crc kubenswrapper[4798]: I1011 04:15:26.893840 4798 generic.go:334] "Generic (PLEG): container finished" podID="fa13805f-6f19-41b8-a0a7-5a18e9e1c908" containerID="1e15afb6381e5aa41d957825a14940bb8501f404457bcae1758e35163b47ca3e" exitCode=0 Oct 11 04:15:26 crc kubenswrapper[4798]: I1011 04:15:26.893882 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-68d4b6d797-k76dd" event={"ID":"fa13805f-6f19-41b8-a0a7-5a18e9e1c908","Type":"ContainerDied","Data":"1e15afb6381e5aa41d957825a14940bb8501f404457bcae1758e35163b47ca3e"} Oct 11 04:15:26 crc kubenswrapper[4798]: I1011 04:15:26.893910 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-68d4b6d797-k76dd" event={"ID":"fa13805f-6f19-41b8-a0a7-5a18e9e1c908","Type":"ContainerDied","Data":"ccaa8d1d17851c00fd09e9b592430bf1fd26a57b4170d0200d46951a61c9299e"} Oct 11 04:15:26 crc kubenswrapper[4798]: I1011 04:15:26.893929 4798 scope.go:117] "RemoveContainer" containerID="1e15afb6381e5aa41d957825a14940bb8501f404457bcae1758e35163b47ca3e" Oct 11 04:15:26 crc kubenswrapper[4798]: I1011 04:15:26.894062 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-68d4b6d797-k76dd" Oct 11 04:15:26 crc kubenswrapper[4798]: I1011 04:15:26.920854 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jhsmv\" (UniqueName: \"kubernetes.io/projected/fa13805f-6f19-41b8-a0a7-5a18e9e1c908-kube-api-access-jhsmv\") pod \"fa13805f-6f19-41b8-a0a7-5a18e9e1c908\" (UID: \"fa13805f-6f19-41b8-a0a7-5a18e9e1c908\") " Oct 11 04:15:26 crc kubenswrapper[4798]: I1011 04:15:26.920941 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/fa13805f-6f19-41b8-a0a7-5a18e9e1c908-dns-svc\") pod \"fa13805f-6f19-41b8-a0a7-5a18e9e1c908\" (UID: \"fa13805f-6f19-41b8-a0a7-5a18e9e1c908\") " Oct 11 04:15:26 crc kubenswrapper[4798]: I1011 04:15:26.921034 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fa13805f-6f19-41b8-a0a7-5a18e9e1c908-config\") pod \"fa13805f-6f19-41b8-a0a7-5a18e9e1c908\" (UID: \"fa13805f-6f19-41b8-a0a7-5a18e9e1c908\") " Oct 11 04:15:26 crc kubenswrapper[4798]: I1011 04:15:26.921202 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/fa13805f-6f19-41b8-a0a7-5a18e9e1c908-ovsdbserver-nb\") pod \"fa13805f-6f19-41b8-a0a7-5a18e9e1c908\" (UID: \"fa13805f-6f19-41b8-a0a7-5a18e9e1c908\") " Oct 11 04:15:26 crc kubenswrapper[4798]: I1011 04:15:26.921315 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/fa13805f-6f19-41b8-a0a7-5a18e9e1c908-ovsdbserver-sb\") pod \"fa13805f-6f19-41b8-a0a7-5a18e9e1c908\" (UID: \"fa13805f-6f19-41b8-a0a7-5a18e9e1c908\") " Oct 11 04:15:26 crc kubenswrapper[4798]: I1011 04:15:26.930738 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fa13805f-6f19-41b8-a0a7-5a18e9e1c908-kube-api-access-jhsmv" (OuterVolumeSpecName: "kube-api-access-jhsmv") pod "fa13805f-6f19-41b8-a0a7-5a18e9e1c908" (UID: "fa13805f-6f19-41b8-a0a7-5a18e9e1c908"). InnerVolumeSpecName "kube-api-access-jhsmv". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 04:15:26 crc kubenswrapper[4798]: I1011 04:15:26.995677 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fa13805f-6f19-41b8-a0a7-5a18e9e1c908-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "fa13805f-6f19-41b8-a0a7-5a18e9e1c908" (UID: "fa13805f-6f19-41b8-a0a7-5a18e9e1c908"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 04:15:27 crc kubenswrapper[4798]: I1011 04:15:27.000780 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fa13805f-6f19-41b8-a0a7-5a18e9e1c908-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "fa13805f-6f19-41b8-a0a7-5a18e9e1c908" (UID: "fa13805f-6f19-41b8-a0a7-5a18e9e1c908"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 04:15:27 crc kubenswrapper[4798]: I1011 04:15:27.016204 4798 scope.go:117] "RemoveContainer" containerID="549bafb0ea114b72205d890e7d87a7659ad5fa639404a8020adb7a19cd7b417b" Oct 11 04:15:27 crc kubenswrapper[4798]: I1011 04:15:27.026721 4798 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/fa13805f-6f19-41b8-a0a7-5a18e9e1c908-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Oct 11 04:15:27 crc kubenswrapper[4798]: I1011 04:15:27.026752 4798 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/fa13805f-6f19-41b8-a0a7-5a18e9e1c908-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Oct 11 04:15:27 crc kubenswrapper[4798]: I1011 04:15:27.026762 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jhsmv\" (UniqueName: \"kubernetes.io/projected/fa13805f-6f19-41b8-a0a7-5a18e9e1c908-kube-api-access-jhsmv\") on node \"crc\" DevicePath \"\"" Oct 11 04:15:27 crc kubenswrapper[4798]: I1011 04:15:27.029843 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fa13805f-6f19-41b8-a0a7-5a18e9e1c908-config" (OuterVolumeSpecName: "config") pod "fa13805f-6f19-41b8-a0a7-5a18e9e1c908" (UID: "fa13805f-6f19-41b8-a0a7-5a18e9e1c908"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 04:15:27 crc kubenswrapper[4798]: I1011 04:15:27.031725 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fa13805f-6f19-41b8-a0a7-5a18e9e1c908-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "fa13805f-6f19-41b8-a0a7-5a18e9e1c908" (UID: "fa13805f-6f19-41b8-a0a7-5a18e9e1c908"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 04:15:27 crc kubenswrapper[4798]: I1011 04:15:27.062876 4798 scope.go:117] "RemoveContainer" containerID="1e15afb6381e5aa41d957825a14940bb8501f404457bcae1758e35163b47ca3e" Oct 11 04:15:27 crc kubenswrapper[4798]: E1011 04:15:27.063564 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1e15afb6381e5aa41d957825a14940bb8501f404457bcae1758e35163b47ca3e\": container with ID starting with 1e15afb6381e5aa41d957825a14940bb8501f404457bcae1758e35163b47ca3e not found: ID does not exist" containerID="1e15afb6381e5aa41d957825a14940bb8501f404457bcae1758e35163b47ca3e" Oct 11 04:15:27 crc kubenswrapper[4798]: I1011 04:15:27.063604 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1e15afb6381e5aa41d957825a14940bb8501f404457bcae1758e35163b47ca3e"} err="failed to get container status \"1e15afb6381e5aa41d957825a14940bb8501f404457bcae1758e35163b47ca3e\": rpc error: code = NotFound desc = could not find container \"1e15afb6381e5aa41d957825a14940bb8501f404457bcae1758e35163b47ca3e\": container with ID starting with 1e15afb6381e5aa41d957825a14940bb8501f404457bcae1758e35163b47ca3e not found: ID does not exist" Oct 11 04:15:27 crc kubenswrapper[4798]: I1011 04:15:27.063631 4798 scope.go:117] "RemoveContainer" containerID="549bafb0ea114b72205d890e7d87a7659ad5fa639404a8020adb7a19cd7b417b" Oct 11 04:15:27 crc kubenswrapper[4798]: E1011 04:15:27.064058 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"549bafb0ea114b72205d890e7d87a7659ad5fa639404a8020adb7a19cd7b417b\": container with ID starting with 549bafb0ea114b72205d890e7d87a7659ad5fa639404a8020adb7a19cd7b417b not found: ID does not exist" containerID="549bafb0ea114b72205d890e7d87a7659ad5fa639404a8020adb7a19cd7b417b" Oct 11 04:15:27 crc kubenswrapper[4798]: I1011 04:15:27.064090 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"549bafb0ea114b72205d890e7d87a7659ad5fa639404a8020adb7a19cd7b417b"} err="failed to get container status \"549bafb0ea114b72205d890e7d87a7659ad5fa639404a8020adb7a19cd7b417b\": rpc error: code = NotFound desc = could not find container \"549bafb0ea114b72205d890e7d87a7659ad5fa639404a8020adb7a19cd7b417b\": container with ID starting with 549bafb0ea114b72205d890e7d87a7659ad5fa639404a8020adb7a19cd7b417b not found: ID does not exist" Oct 11 04:15:27 crc kubenswrapper[4798]: I1011 04:15:27.128897 4798 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/fa13805f-6f19-41b8-a0a7-5a18e9e1c908-dns-svc\") on node \"crc\" DevicePath \"\"" Oct 11 04:15:27 crc kubenswrapper[4798]: I1011 04:15:27.128943 4798 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fa13805f-6f19-41b8-a0a7-5a18e9e1c908-config\") on node \"crc\" DevicePath \"\"" Oct 11 04:15:27 crc kubenswrapper[4798]: I1011 04:15:27.232196 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-68d4b6d797-k76dd"] Oct 11 04:15:27 crc kubenswrapper[4798]: I1011 04:15:27.241006 4798 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-68d4b6d797-k76dd"] Oct 11 04:15:27 crc kubenswrapper[4798]: I1011 04:15:27.305361 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-fbc59fbb7-l6zlt"] Oct 11 04:15:27 crc kubenswrapper[4798]: W1011 04:15:27.308808 4798 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1ecaa1d8_972c_4d57_84ef_9c86b16c3c36.slice/crio-f45384c58b1f03d8dea6374916101254a0dd55ef5146c561ae7d4e82af2cadfe WatchSource:0}: Error finding container f45384c58b1f03d8dea6374916101254a0dd55ef5146c561ae7d4e82af2cadfe: Status 404 returned error can't find the container with id f45384c58b1f03d8dea6374916101254a0dd55ef5146c561ae7d4e82af2cadfe Oct 11 04:15:27 crc kubenswrapper[4798]: I1011 04:15:27.439788 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fa13805f-6f19-41b8-a0a7-5a18e9e1c908" path="/var/lib/kubelet/pods/fa13805f-6f19-41b8-a0a7-5a18e9e1c908/volumes" Oct 11 04:15:27 crc kubenswrapper[4798]: I1011 04:15:27.904970 4798 generic.go:334] "Generic (PLEG): container finished" podID="1ecaa1d8-972c-4d57-84ef-9c86b16c3c36" containerID="a08941f812228abb5cc33df5c8d3ca37b21ed16b84dce06ca863b44ed8b67cab" exitCode=0 Oct 11 04:15:27 crc kubenswrapper[4798]: I1011 04:15:27.905079 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-fbc59fbb7-l6zlt" event={"ID":"1ecaa1d8-972c-4d57-84ef-9c86b16c3c36","Type":"ContainerDied","Data":"a08941f812228abb5cc33df5c8d3ca37b21ed16b84dce06ca863b44ed8b67cab"} Oct 11 04:15:27 crc kubenswrapper[4798]: I1011 04:15:27.905144 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-fbc59fbb7-l6zlt" event={"ID":"1ecaa1d8-972c-4d57-84ef-9c86b16c3c36","Type":"ContainerStarted","Data":"f45384c58b1f03d8dea6374916101254a0dd55ef5146c561ae7d4e82af2cadfe"} Oct 11 04:15:28 crc kubenswrapper[4798]: I1011 04:15:28.917561 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-fbc59fbb7-l6zlt" event={"ID":"1ecaa1d8-972c-4d57-84ef-9c86b16c3c36","Type":"ContainerStarted","Data":"5897186685fe77a987b8b5d0a9355871f553c4c9ea21f2cb11d15867e1bae8a2"} Oct 11 04:15:28 crc kubenswrapper[4798]: I1011 04:15:28.917707 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-fbc59fbb7-l6zlt" Oct 11 04:15:28 crc kubenswrapper[4798]: I1011 04:15:28.949834 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-fbc59fbb7-l6zlt" podStartSLOduration=2.9498010900000002 podStartE2EDuration="2.94980109s" podCreationTimestamp="2025-10-11 04:15:26 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 04:15:28.938977952 +0000 UTC m=+1224.275267678" watchObservedRunningTime="2025-10-11 04:15:28.94980109 +0000 UTC m=+1224.286090806" Oct 11 04:15:31 crc kubenswrapper[4798]: I1011 04:15:31.597040 4798 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-68d4b6d797-k76dd" podUID="fa13805f-6f19-41b8-a0a7-5a18e9e1c908" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.181:5353: i/o timeout" Oct 11 04:15:36 crc kubenswrapper[4798]: I1011 04:15:36.842690 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-fbc59fbb7-l6zlt" Oct 11 04:15:36 crc kubenswrapper[4798]: I1011 04:15:36.900502 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-578b8d767c-s6rjn"] Oct 11 04:15:36 crc kubenswrapper[4798]: I1011 04:15:36.901113 4798 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-578b8d767c-s6rjn" podUID="ee0fdd8b-8eab-4bff-833a-92acacd45ea4" containerName="dnsmasq-dns" containerID="cri-o://c7906bd0ee5b41ec4203572994cd65a0bc42b6509707ac846b65e956571cc827" gracePeriod=10 Oct 11 04:15:37 crc kubenswrapper[4798]: I1011 04:15:37.422447 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-578b8d767c-s6rjn" Oct 11 04:15:37 crc kubenswrapper[4798]: I1011 04:15:37.454624 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ee0fdd8b-8eab-4bff-833a-92acacd45ea4-config\") pod \"ee0fdd8b-8eab-4bff-833a-92acacd45ea4\" (UID: \"ee0fdd8b-8eab-4bff-833a-92acacd45ea4\") " Oct 11 04:15:37 crc kubenswrapper[4798]: I1011 04:15:37.454835 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kjf89\" (UniqueName: \"kubernetes.io/projected/ee0fdd8b-8eab-4bff-833a-92acacd45ea4-kube-api-access-kjf89\") pod \"ee0fdd8b-8eab-4bff-833a-92acacd45ea4\" (UID: \"ee0fdd8b-8eab-4bff-833a-92acacd45ea4\") " Oct 11 04:15:37 crc kubenswrapper[4798]: I1011 04:15:37.454877 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/ee0fdd8b-8eab-4bff-833a-92acacd45ea4-openstack-edpm-ipam\") pod \"ee0fdd8b-8eab-4bff-833a-92acacd45ea4\" (UID: \"ee0fdd8b-8eab-4bff-833a-92acacd45ea4\") " Oct 11 04:15:37 crc kubenswrapper[4798]: I1011 04:15:37.455095 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ee0fdd8b-8eab-4bff-833a-92acacd45ea4-dns-svc\") pod \"ee0fdd8b-8eab-4bff-833a-92acacd45ea4\" (UID: \"ee0fdd8b-8eab-4bff-833a-92acacd45ea4\") " Oct 11 04:15:37 crc kubenswrapper[4798]: I1011 04:15:37.455140 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ee0fdd8b-8eab-4bff-833a-92acacd45ea4-ovsdbserver-sb\") pod \"ee0fdd8b-8eab-4bff-833a-92acacd45ea4\" (UID: \"ee0fdd8b-8eab-4bff-833a-92acacd45ea4\") " Oct 11 04:15:37 crc kubenswrapper[4798]: I1011 04:15:37.455169 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ee0fdd8b-8eab-4bff-833a-92acacd45ea4-ovsdbserver-nb\") pod \"ee0fdd8b-8eab-4bff-833a-92acacd45ea4\" (UID: \"ee0fdd8b-8eab-4bff-833a-92acacd45ea4\") " Oct 11 04:15:37 crc kubenswrapper[4798]: I1011 04:15:37.496761 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ee0fdd8b-8eab-4bff-833a-92acacd45ea4-kube-api-access-kjf89" (OuterVolumeSpecName: "kube-api-access-kjf89") pod "ee0fdd8b-8eab-4bff-833a-92acacd45ea4" (UID: "ee0fdd8b-8eab-4bff-833a-92acacd45ea4"). InnerVolumeSpecName "kube-api-access-kjf89". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 04:15:37 crc kubenswrapper[4798]: I1011 04:15:37.520518 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ee0fdd8b-8eab-4bff-833a-92acacd45ea4-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "ee0fdd8b-8eab-4bff-833a-92acacd45ea4" (UID: "ee0fdd8b-8eab-4bff-833a-92acacd45ea4"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 04:15:37 crc kubenswrapper[4798]: I1011 04:15:37.536816 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ee0fdd8b-8eab-4bff-833a-92acacd45ea4-openstack-edpm-ipam" (OuterVolumeSpecName: "openstack-edpm-ipam") pod "ee0fdd8b-8eab-4bff-833a-92acacd45ea4" (UID: "ee0fdd8b-8eab-4bff-833a-92acacd45ea4"). InnerVolumeSpecName "openstack-edpm-ipam". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 04:15:37 crc kubenswrapper[4798]: I1011 04:15:37.542748 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ee0fdd8b-8eab-4bff-833a-92acacd45ea4-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "ee0fdd8b-8eab-4bff-833a-92acacd45ea4" (UID: "ee0fdd8b-8eab-4bff-833a-92acacd45ea4"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 04:15:37 crc kubenswrapper[4798]: I1011 04:15:37.549268 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ee0fdd8b-8eab-4bff-833a-92acacd45ea4-config" (OuterVolumeSpecName: "config") pod "ee0fdd8b-8eab-4bff-833a-92acacd45ea4" (UID: "ee0fdd8b-8eab-4bff-833a-92acacd45ea4"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 04:15:37 crc kubenswrapper[4798]: I1011 04:15:37.560478 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kjf89\" (UniqueName: \"kubernetes.io/projected/ee0fdd8b-8eab-4bff-833a-92acacd45ea4-kube-api-access-kjf89\") on node \"crc\" DevicePath \"\"" Oct 11 04:15:37 crc kubenswrapper[4798]: I1011 04:15:37.560526 4798 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/ee0fdd8b-8eab-4bff-833a-92acacd45ea4-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Oct 11 04:15:37 crc kubenswrapper[4798]: I1011 04:15:37.560540 4798 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ee0fdd8b-8eab-4bff-833a-92acacd45ea4-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Oct 11 04:15:37 crc kubenswrapper[4798]: I1011 04:15:37.560552 4798 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ee0fdd8b-8eab-4bff-833a-92acacd45ea4-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Oct 11 04:15:37 crc kubenswrapper[4798]: I1011 04:15:37.560565 4798 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ee0fdd8b-8eab-4bff-833a-92acacd45ea4-config\") on node \"crc\" DevicePath \"\"" Oct 11 04:15:37 crc kubenswrapper[4798]: I1011 04:15:37.568268 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ee0fdd8b-8eab-4bff-833a-92acacd45ea4-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "ee0fdd8b-8eab-4bff-833a-92acacd45ea4" (UID: "ee0fdd8b-8eab-4bff-833a-92acacd45ea4"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 04:15:37 crc kubenswrapper[4798]: I1011 04:15:37.662947 4798 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ee0fdd8b-8eab-4bff-833a-92acacd45ea4-dns-svc\") on node \"crc\" DevicePath \"\"" Oct 11 04:15:38 crc kubenswrapper[4798]: I1011 04:15:38.016984 4798 generic.go:334] "Generic (PLEG): container finished" podID="ee0fdd8b-8eab-4bff-833a-92acacd45ea4" containerID="c7906bd0ee5b41ec4203572994cd65a0bc42b6509707ac846b65e956571cc827" exitCode=0 Oct 11 04:15:38 crc kubenswrapper[4798]: I1011 04:15:38.017030 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-578b8d767c-s6rjn" event={"ID":"ee0fdd8b-8eab-4bff-833a-92acacd45ea4","Type":"ContainerDied","Data":"c7906bd0ee5b41ec4203572994cd65a0bc42b6509707ac846b65e956571cc827"} Oct 11 04:15:38 crc kubenswrapper[4798]: I1011 04:15:38.017058 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-578b8d767c-s6rjn" event={"ID":"ee0fdd8b-8eab-4bff-833a-92acacd45ea4","Type":"ContainerDied","Data":"2cf5def8440ced1d3fd62f54f49c9c3218c334a485b347a43efedec356d51975"} Oct 11 04:15:38 crc kubenswrapper[4798]: I1011 04:15:38.017077 4798 scope.go:117] "RemoveContainer" containerID="c7906bd0ee5b41ec4203572994cd65a0bc42b6509707ac846b65e956571cc827" Oct 11 04:15:38 crc kubenswrapper[4798]: I1011 04:15:38.017078 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-578b8d767c-s6rjn" Oct 11 04:15:38 crc kubenswrapper[4798]: I1011 04:15:38.070590 4798 scope.go:117] "RemoveContainer" containerID="1ffb1fafab0d80c8351fc3eeb3cafa17815e58378dd7b7b5813ab1e85b741730" Oct 11 04:15:38 crc kubenswrapper[4798]: I1011 04:15:38.077721 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-578b8d767c-s6rjn"] Oct 11 04:15:38 crc kubenswrapper[4798]: I1011 04:15:38.085367 4798 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-578b8d767c-s6rjn"] Oct 11 04:15:38 crc kubenswrapper[4798]: I1011 04:15:38.105514 4798 scope.go:117] "RemoveContainer" containerID="c7906bd0ee5b41ec4203572994cd65a0bc42b6509707ac846b65e956571cc827" Oct 11 04:15:38 crc kubenswrapper[4798]: E1011 04:15:38.106508 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c7906bd0ee5b41ec4203572994cd65a0bc42b6509707ac846b65e956571cc827\": container with ID starting with c7906bd0ee5b41ec4203572994cd65a0bc42b6509707ac846b65e956571cc827 not found: ID does not exist" containerID="c7906bd0ee5b41ec4203572994cd65a0bc42b6509707ac846b65e956571cc827" Oct 11 04:15:38 crc kubenswrapper[4798]: I1011 04:15:38.106597 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c7906bd0ee5b41ec4203572994cd65a0bc42b6509707ac846b65e956571cc827"} err="failed to get container status \"c7906bd0ee5b41ec4203572994cd65a0bc42b6509707ac846b65e956571cc827\": rpc error: code = NotFound desc = could not find container \"c7906bd0ee5b41ec4203572994cd65a0bc42b6509707ac846b65e956571cc827\": container with ID starting with c7906bd0ee5b41ec4203572994cd65a0bc42b6509707ac846b65e956571cc827 not found: ID does not exist" Oct 11 04:15:38 crc kubenswrapper[4798]: I1011 04:15:38.106645 4798 scope.go:117] "RemoveContainer" containerID="1ffb1fafab0d80c8351fc3eeb3cafa17815e58378dd7b7b5813ab1e85b741730" Oct 11 04:15:38 crc kubenswrapper[4798]: E1011 04:15:38.107055 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1ffb1fafab0d80c8351fc3eeb3cafa17815e58378dd7b7b5813ab1e85b741730\": container with ID starting with 1ffb1fafab0d80c8351fc3eeb3cafa17815e58378dd7b7b5813ab1e85b741730 not found: ID does not exist" containerID="1ffb1fafab0d80c8351fc3eeb3cafa17815e58378dd7b7b5813ab1e85b741730" Oct 11 04:15:38 crc kubenswrapper[4798]: I1011 04:15:38.107093 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1ffb1fafab0d80c8351fc3eeb3cafa17815e58378dd7b7b5813ab1e85b741730"} err="failed to get container status \"1ffb1fafab0d80c8351fc3eeb3cafa17815e58378dd7b7b5813ab1e85b741730\": rpc error: code = NotFound desc = could not find container \"1ffb1fafab0d80c8351fc3eeb3cafa17815e58378dd7b7b5813ab1e85b741730\": container with ID starting with 1ffb1fafab0d80c8351fc3eeb3cafa17815e58378dd7b7b5813ab1e85b741730 not found: ID does not exist" Oct 11 04:15:39 crc kubenswrapper[4798]: I1011 04:15:39.436677 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ee0fdd8b-8eab-4bff-833a-92acacd45ea4" path="/var/lib/kubelet/pods/ee0fdd8b-8eab-4bff-833a-92acacd45ea4/volumes" Oct 11 04:15:42 crc kubenswrapper[4798]: I1011 04:15:42.755083 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-dq486"] Oct 11 04:15:42 crc kubenswrapper[4798]: E1011 04:15:42.756368 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fa13805f-6f19-41b8-a0a7-5a18e9e1c908" containerName="init" Oct 11 04:15:42 crc kubenswrapper[4798]: I1011 04:15:42.756387 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="fa13805f-6f19-41b8-a0a7-5a18e9e1c908" containerName="init" Oct 11 04:15:42 crc kubenswrapper[4798]: E1011 04:15:42.756429 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ee0fdd8b-8eab-4bff-833a-92acacd45ea4" containerName="dnsmasq-dns" Oct 11 04:15:42 crc kubenswrapper[4798]: I1011 04:15:42.756436 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="ee0fdd8b-8eab-4bff-833a-92acacd45ea4" containerName="dnsmasq-dns" Oct 11 04:15:42 crc kubenswrapper[4798]: E1011 04:15:42.756462 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ee0fdd8b-8eab-4bff-833a-92acacd45ea4" containerName="init" Oct 11 04:15:42 crc kubenswrapper[4798]: I1011 04:15:42.756470 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="ee0fdd8b-8eab-4bff-833a-92acacd45ea4" containerName="init" Oct 11 04:15:42 crc kubenswrapper[4798]: E1011 04:15:42.756494 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fa13805f-6f19-41b8-a0a7-5a18e9e1c908" containerName="dnsmasq-dns" Oct 11 04:15:42 crc kubenswrapper[4798]: I1011 04:15:42.756501 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="fa13805f-6f19-41b8-a0a7-5a18e9e1c908" containerName="dnsmasq-dns" Oct 11 04:15:42 crc kubenswrapper[4798]: I1011 04:15:42.757043 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="fa13805f-6f19-41b8-a0a7-5a18e9e1c908" containerName="dnsmasq-dns" Oct 11 04:15:42 crc kubenswrapper[4798]: I1011 04:15:42.757072 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="ee0fdd8b-8eab-4bff-833a-92acacd45ea4" containerName="dnsmasq-dns" Oct 11 04:15:42 crc kubenswrapper[4798]: I1011 04:15:42.757908 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-dq486" Oct 11 04:15:42 crc kubenswrapper[4798]: I1011 04:15:42.764124 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-mgsh6" Oct 11 04:15:42 crc kubenswrapper[4798]: I1011 04:15:42.764129 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Oct 11 04:15:42 crc kubenswrapper[4798]: I1011 04:15:42.764982 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Oct 11 04:15:42 crc kubenswrapper[4798]: I1011 04:15:42.765031 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Oct 11 04:15:42 crc kubenswrapper[4798]: I1011 04:15:42.794577 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-dq486"] Oct 11 04:15:42 crc kubenswrapper[4798]: I1011 04:15:42.867037 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a740bd42-4c8c-42a1-8b39-50e2f9389b40-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-dq486\" (UID: \"a740bd42-4c8c-42a1-8b39-50e2f9389b40\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-dq486" Oct 11 04:15:42 crc kubenswrapper[4798]: I1011 04:15:42.867496 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a740bd42-4c8c-42a1-8b39-50e2f9389b40-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-dq486\" (UID: \"a740bd42-4c8c-42a1-8b39-50e2f9389b40\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-dq486" Oct 11 04:15:42 crc kubenswrapper[4798]: I1011 04:15:42.867533 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a740bd42-4c8c-42a1-8b39-50e2f9389b40-ssh-key\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-dq486\" (UID: \"a740bd42-4c8c-42a1-8b39-50e2f9389b40\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-dq486" Oct 11 04:15:42 crc kubenswrapper[4798]: I1011 04:15:42.867597 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nmnwc\" (UniqueName: \"kubernetes.io/projected/a740bd42-4c8c-42a1-8b39-50e2f9389b40-kube-api-access-nmnwc\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-dq486\" (UID: \"a740bd42-4c8c-42a1-8b39-50e2f9389b40\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-dq486" Oct 11 04:15:42 crc kubenswrapper[4798]: I1011 04:15:42.969516 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a740bd42-4c8c-42a1-8b39-50e2f9389b40-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-dq486\" (UID: \"a740bd42-4c8c-42a1-8b39-50e2f9389b40\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-dq486" Oct 11 04:15:42 crc kubenswrapper[4798]: I1011 04:15:42.970111 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a740bd42-4c8c-42a1-8b39-50e2f9389b40-ssh-key\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-dq486\" (UID: \"a740bd42-4c8c-42a1-8b39-50e2f9389b40\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-dq486" Oct 11 04:15:42 crc kubenswrapper[4798]: I1011 04:15:42.970427 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nmnwc\" (UniqueName: \"kubernetes.io/projected/a740bd42-4c8c-42a1-8b39-50e2f9389b40-kube-api-access-nmnwc\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-dq486\" (UID: \"a740bd42-4c8c-42a1-8b39-50e2f9389b40\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-dq486" Oct 11 04:15:42 crc kubenswrapper[4798]: I1011 04:15:42.970643 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a740bd42-4c8c-42a1-8b39-50e2f9389b40-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-dq486\" (UID: \"a740bd42-4c8c-42a1-8b39-50e2f9389b40\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-dq486" Oct 11 04:15:42 crc kubenswrapper[4798]: I1011 04:15:42.976519 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a740bd42-4c8c-42a1-8b39-50e2f9389b40-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-dq486\" (UID: \"a740bd42-4c8c-42a1-8b39-50e2f9389b40\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-dq486" Oct 11 04:15:42 crc kubenswrapper[4798]: I1011 04:15:42.976542 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a740bd42-4c8c-42a1-8b39-50e2f9389b40-ssh-key\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-dq486\" (UID: \"a740bd42-4c8c-42a1-8b39-50e2f9389b40\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-dq486" Oct 11 04:15:42 crc kubenswrapper[4798]: I1011 04:15:42.984063 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a740bd42-4c8c-42a1-8b39-50e2f9389b40-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-dq486\" (UID: \"a740bd42-4c8c-42a1-8b39-50e2f9389b40\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-dq486" Oct 11 04:15:42 crc kubenswrapper[4798]: I1011 04:15:42.988457 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nmnwc\" (UniqueName: \"kubernetes.io/projected/a740bd42-4c8c-42a1-8b39-50e2f9389b40-kube-api-access-nmnwc\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-dq486\" (UID: \"a740bd42-4c8c-42a1-8b39-50e2f9389b40\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-dq486" Oct 11 04:15:43 crc kubenswrapper[4798]: I1011 04:15:43.093725 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-dq486" Oct 11 04:15:43 crc kubenswrapper[4798]: I1011 04:15:43.731883 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-dq486"] Oct 11 04:15:43 crc kubenswrapper[4798]: I1011 04:15:43.748573 4798 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Oct 11 04:15:44 crc kubenswrapper[4798]: I1011 04:15:44.072664 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-dq486" event={"ID":"a740bd42-4c8c-42a1-8b39-50e2f9389b40","Type":"ContainerStarted","Data":"cf9167c1e719589517eb8f4f55d21ef610f3ae185ccc48dc5d4e95e4b85b6a72"} Oct 11 04:15:48 crc kubenswrapper[4798]: I1011 04:15:48.115101 4798 generic.go:334] "Generic (PLEG): container finished" podID="c95f2c7f-03e4-4ded-8ff7-8d7804dae8fe" containerID="0d778cd278c428f53b7d93f074cbd3dd114fc9c23a125898cf315c89f44f3efa" exitCode=0 Oct 11 04:15:48 crc kubenswrapper[4798]: I1011 04:15:48.115197 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"c95f2c7f-03e4-4ded-8ff7-8d7804dae8fe","Type":"ContainerDied","Data":"0d778cd278c428f53b7d93f074cbd3dd114fc9c23a125898cf315c89f44f3efa"} Oct 11 04:15:50 crc kubenswrapper[4798]: I1011 04:15:50.144376 4798 generic.go:334] "Generic (PLEG): container finished" podID="cdfee9bc-39ae-48a4-9645-7191a2ae2cd5" containerID="2e7b928dd4ae5ed2adb389e97405dfcbd3e33264b7673d2fd1845e60e735fe94" exitCode=0 Oct 11 04:15:50 crc kubenswrapper[4798]: I1011 04:15:50.144434 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"cdfee9bc-39ae-48a4-9645-7191a2ae2cd5","Type":"ContainerDied","Data":"2e7b928dd4ae5ed2adb389e97405dfcbd3e33264b7673d2fd1845e60e735fe94"} Oct 11 04:15:53 crc kubenswrapper[4798]: I1011 04:15:53.193863 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"c95f2c7f-03e4-4ded-8ff7-8d7804dae8fe","Type":"ContainerStarted","Data":"90c71557a6326a94b489a6004c7fd11e8c34bd402282d728c9997f4b6027e7d3"} Oct 11 04:15:53 crc kubenswrapper[4798]: I1011 04:15:53.194933 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-0" Oct 11 04:15:53 crc kubenswrapper[4798]: I1011 04:15:53.196025 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"cdfee9bc-39ae-48a4-9645-7191a2ae2cd5","Type":"ContainerStarted","Data":"b9f9652390d8b4e87a74af9efe0245c86ac1722b9949b5ebb24228802f75ba61"} Oct 11 04:15:53 crc kubenswrapper[4798]: I1011 04:15:53.196709 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-cell1-server-0" Oct 11 04:15:53 crc kubenswrapper[4798]: I1011 04:15:53.198939 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-dq486" event={"ID":"a740bd42-4c8c-42a1-8b39-50e2f9389b40","Type":"ContainerStarted","Data":"961bbec559ae3d181a04931cf29dfa73c41debf56553105153d2a5f2347b8b7b"} Oct 11 04:15:53 crc kubenswrapper[4798]: I1011 04:15:53.226803 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-0" podStartSLOduration=40.226771336 podStartE2EDuration="40.226771336s" podCreationTimestamp="2025-10-11 04:15:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 04:15:53.217990655 +0000 UTC m=+1248.554280341" watchObservedRunningTime="2025-10-11 04:15:53.226771336 +0000 UTC m=+1248.563061022" Oct 11 04:15:53 crc kubenswrapper[4798]: I1011 04:15:53.268055 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-dq486" podStartSLOduration=3.078597048 podStartE2EDuration="11.268033414s" podCreationTimestamp="2025-10-11 04:15:42 +0000 UTC" firstStartedPulling="2025-10-11 04:15:43.748207782 +0000 UTC m=+1239.084497468" lastFinishedPulling="2025-10-11 04:15:51.937644158 +0000 UTC m=+1247.273933834" observedRunningTime="2025-10-11 04:15:53.242406851 +0000 UTC m=+1248.578696537" watchObservedRunningTime="2025-10-11 04:15:53.268033414 +0000 UTC m=+1248.604323100" Oct 11 04:15:53 crc kubenswrapper[4798]: I1011 04:15:53.272262 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-cell1-server-0" podStartSLOduration=39.272242485 podStartE2EDuration="39.272242485s" podCreationTimestamp="2025-10-11 04:15:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 04:15:53.263594648 +0000 UTC m=+1248.599884334" watchObservedRunningTime="2025-10-11 04:15:53.272242485 +0000 UTC m=+1248.608532171" Oct 11 04:15:57 crc kubenswrapper[4798]: I1011 04:15:57.138059 4798 patch_prober.go:28] interesting pod/machine-config-daemon-h28s2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 11 04:15:57 crc kubenswrapper[4798]: I1011 04:15:57.138846 4798 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 11 04:16:03 crc kubenswrapper[4798]: I1011 04:16:03.438713 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-0" Oct 11 04:16:04 crc kubenswrapper[4798]: I1011 04:16:04.305993 4798 generic.go:334] "Generic (PLEG): container finished" podID="a740bd42-4c8c-42a1-8b39-50e2f9389b40" containerID="961bbec559ae3d181a04931cf29dfa73c41debf56553105153d2a5f2347b8b7b" exitCode=0 Oct 11 04:16:04 crc kubenswrapper[4798]: I1011 04:16:04.306085 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-dq486" event={"ID":"a740bd42-4c8c-42a1-8b39-50e2f9389b40","Type":"ContainerDied","Data":"961bbec559ae3d181a04931cf29dfa73c41debf56553105153d2a5f2347b8b7b"} Oct 11 04:16:04 crc kubenswrapper[4798]: I1011 04:16:04.406600 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-cell1-server-0" Oct 11 04:16:05 crc kubenswrapper[4798]: I1011 04:16:05.817678 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-dq486" Oct 11 04:16:05 crc kubenswrapper[4798]: I1011 04:16:05.945437 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nmnwc\" (UniqueName: \"kubernetes.io/projected/a740bd42-4c8c-42a1-8b39-50e2f9389b40-kube-api-access-nmnwc\") pod \"a740bd42-4c8c-42a1-8b39-50e2f9389b40\" (UID: \"a740bd42-4c8c-42a1-8b39-50e2f9389b40\") " Oct 11 04:16:05 crc kubenswrapper[4798]: I1011 04:16:05.945643 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a740bd42-4c8c-42a1-8b39-50e2f9389b40-repo-setup-combined-ca-bundle\") pod \"a740bd42-4c8c-42a1-8b39-50e2f9389b40\" (UID: \"a740bd42-4c8c-42a1-8b39-50e2f9389b40\") " Oct 11 04:16:05 crc kubenswrapper[4798]: I1011 04:16:05.945674 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a740bd42-4c8c-42a1-8b39-50e2f9389b40-ssh-key\") pod \"a740bd42-4c8c-42a1-8b39-50e2f9389b40\" (UID: \"a740bd42-4c8c-42a1-8b39-50e2f9389b40\") " Oct 11 04:16:05 crc kubenswrapper[4798]: I1011 04:16:05.945796 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a740bd42-4c8c-42a1-8b39-50e2f9389b40-inventory\") pod \"a740bd42-4c8c-42a1-8b39-50e2f9389b40\" (UID: \"a740bd42-4c8c-42a1-8b39-50e2f9389b40\") " Oct 11 04:16:05 crc kubenswrapper[4798]: I1011 04:16:05.951383 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a740bd42-4c8c-42a1-8b39-50e2f9389b40-repo-setup-combined-ca-bundle" (OuterVolumeSpecName: "repo-setup-combined-ca-bundle") pod "a740bd42-4c8c-42a1-8b39-50e2f9389b40" (UID: "a740bd42-4c8c-42a1-8b39-50e2f9389b40"). InnerVolumeSpecName "repo-setup-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:16:05 crc kubenswrapper[4798]: I1011 04:16:05.951587 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a740bd42-4c8c-42a1-8b39-50e2f9389b40-kube-api-access-nmnwc" (OuterVolumeSpecName: "kube-api-access-nmnwc") pod "a740bd42-4c8c-42a1-8b39-50e2f9389b40" (UID: "a740bd42-4c8c-42a1-8b39-50e2f9389b40"). InnerVolumeSpecName "kube-api-access-nmnwc". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 04:16:05 crc kubenswrapper[4798]: I1011 04:16:05.971958 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a740bd42-4c8c-42a1-8b39-50e2f9389b40-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "a740bd42-4c8c-42a1-8b39-50e2f9389b40" (UID: "a740bd42-4c8c-42a1-8b39-50e2f9389b40"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:16:05 crc kubenswrapper[4798]: I1011 04:16:05.974553 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a740bd42-4c8c-42a1-8b39-50e2f9389b40-inventory" (OuterVolumeSpecName: "inventory") pod "a740bd42-4c8c-42a1-8b39-50e2f9389b40" (UID: "a740bd42-4c8c-42a1-8b39-50e2f9389b40"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:16:06 crc kubenswrapper[4798]: I1011 04:16:06.047885 4798 reconciler_common.go:293] "Volume detached for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a740bd42-4c8c-42a1-8b39-50e2f9389b40-repo-setup-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 11 04:16:06 crc kubenswrapper[4798]: I1011 04:16:06.047918 4798 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a740bd42-4c8c-42a1-8b39-50e2f9389b40-ssh-key\") on node \"crc\" DevicePath \"\"" Oct 11 04:16:06 crc kubenswrapper[4798]: I1011 04:16:06.047932 4798 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a740bd42-4c8c-42a1-8b39-50e2f9389b40-inventory\") on node \"crc\" DevicePath \"\"" Oct 11 04:16:06 crc kubenswrapper[4798]: I1011 04:16:06.047942 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nmnwc\" (UniqueName: \"kubernetes.io/projected/a740bd42-4c8c-42a1-8b39-50e2f9389b40-kube-api-access-nmnwc\") on node \"crc\" DevicePath \"\"" Oct 11 04:16:06 crc kubenswrapper[4798]: I1011 04:16:06.331073 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-dq486" event={"ID":"a740bd42-4c8c-42a1-8b39-50e2f9389b40","Type":"ContainerDied","Data":"cf9167c1e719589517eb8f4f55d21ef610f3ae185ccc48dc5d4e95e4b85b6a72"} Oct 11 04:16:06 crc kubenswrapper[4798]: I1011 04:16:06.331118 4798 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="cf9167c1e719589517eb8f4f55d21ef610f3ae185ccc48dc5d4e95e4b85b6a72" Oct 11 04:16:06 crc kubenswrapper[4798]: I1011 04:16:06.331631 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-dq486" Oct 11 04:16:06 crc kubenswrapper[4798]: I1011 04:16:06.415720 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-79zk4"] Oct 11 04:16:06 crc kubenswrapper[4798]: E1011 04:16:06.416160 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a740bd42-4c8c-42a1-8b39-50e2f9389b40" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Oct 11 04:16:06 crc kubenswrapper[4798]: I1011 04:16:06.416189 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="a740bd42-4c8c-42a1-8b39-50e2f9389b40" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Oct 11 04:16:06 crc kubenswrapper[4798]: I1011 04:16:06.416433 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="a740bd42-4c8c-42a1-8b39-50e2f9389b40" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Oct 11 04:16:06 crc kubenswrapper[4798]: I1011 04:16:06.417055 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-79zk4" Oct 11 04:16:06 crc kubenswrapper[4798]: I1011 04:16:06.419321 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Oct 11 04:16:06 crc kubenswrapper[4798]: I1011 04:16:06.419552 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Oct 11 04:16:06 crc kubenswrapper[4798]: I1011 04:16:06.419690 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-mgsh6" Oct 11 04:16:06 crc kubenswrapper[4798]: I1011 04:16:06.419810 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Oct 11 04:16:06 crc kubenswrapper[4798]: I1011 04:16:06.426734 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-79zk4"] Oct 11 04:16:06 crc kubenswrapper[4798]: I1011 04:16:06.454836 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wnzhl\" (UniqueName: \"kubernetes.io/projected/326c1bc8-1869-4ca2-8e8b-10356a3ca498-kube-api-access-wnzhl\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-79zk4\" (UID: \"326c1bc8-1869-4ca2-8e8b-10356a3ca498\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-79zk4" Oct 11 04:16:06 crc kubenswrapper[4798]: I1011 04:16:06.455195 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/326c1bc8-1869-4ca2-8e8b-10356a3ca498-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-79zk4\" (UID: \"326c1bc8-1869-4ca2-8e8b-10356a3ca498\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-79zk4" Oct 11 04:16:06 crc kubenswrapper[4798]: I1011 04:16:06.455230 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/326c1bc8-1869-4ca2-8e8b-10356a3ca498-ssh-key\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-79zk4\" (UID: \"326c1bc8-1869-4ca2-8e8b-10356a3ca498\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-79zk4" Oct 11 04:16:06 crc kubenswrapper[4798]: I1011 04:16:06.455269 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/326c1bc8-1869-4ca2-8e8b-10356a3ca498-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-79zk4\" (UID: \"326c1bc8-1869-4ca2-8e8b-10356a3ca498\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-79zk4" Oct 11 04:16:06 crc kubenswrapper[4798]: I1011 04:16:06.556665 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wnzhl\" (UniqueName: \"kubernetes.io/projected/326c1bc8-1869-4ca2-8e8b-10356a3ca498-kube-api-access-wnzhl\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-79zk4\" (UID: \"326c1bc8-1869-4ca2-8e8b-10356a3ca498\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-79zk4" Oct 11 04:16:06 crc kubenswrapper[4798]: I1011 04:16:06.556732 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/326c1bc8-1869-4ca2-8e8b-10356a3ca498-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-79zk4\" (UID: \"326c1bc8-1869-4ca2-8e8b-10356a3ca498\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-79zk4" Oct 11 04:16:06 crc kubenswrapper[4798]: I1011 04:16:06.556765 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/326c1bc8-1869-4ca2-8e8b-10356a3ca498-ssh-key\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-79zk4\" (UID: \"326c1bc8-1869-4ca2-8e8b-10356a3ca498\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-79zk4" Oct 11 04:16:06 crc kubenswrapper[4798]: I1011 04:16:06.556803 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/326c1bc8-1869-4ca2-8e8b-10356a3ca498-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-79zk4\" (UID: \"326c1bc8-1869-4ca2-8e8b-10356a3ca498\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-79zk4" Oct 11 04:16:06 crc kubenswrapper[4798]: I1011 04:16:06.561966 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/326c1bc8-1869-4ca2-8e8b-10356a3ca498-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-79zk4\" (UID: \"326c1bc8-1869-4ca2-8e8b-10356a3ca498\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-79zk4" Oct 11 04:16:06 crc kubenswrapper[4798]: I1011 04:16:06.562913 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/326c1bc8-1869-4ca2-8e8b-10356a3ca498-ssh-key\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-79zk4\" (UID: \"326c1bc8-1869-4ca2-8e8b-10356a3ca498\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-79zk4" Oct 11 04:16:06 crc kubenswrapper[4798]: I1011 04:16:06.568834 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/326c1bc8-1869-4ca2-8e8b-10356a3ca498-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-79zk4\" (UID: \"326c1bc8-1869-4ca2-8e8b-10356a3ca498\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-79zk4" Oct 11 04:16:06 crc kubenswrapper[4798]: I1011 04:16:06.574901 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wnzhl\" (UniqueName: \"kubernetes.io/projected/326c1bc8-1869-4ca2-8e8b-10356a3ca498-kube-api-access-wnzhl\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-79zk4\" (UID: \"326c1bc8-1869-4ca2-8e8b-10356a3ca498\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-79zk4" Oct 11 04:16:06 crc kubenswrapper[4798]: I1011 04:16:06.745532 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-79zk4" Oct 11 04:16:07 crc kubenswrapper[4798]: W1011 04:16:07.296834 4798 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod326c1bc8_1869_4ca2_8e8b_10356a3ca498.slice/crio-ba4207c4aa08e0320a3368e5e633b69e8f57e33fe4589e71bbfd1013c6088f23 WatchSource:0}: Error finding container ba4207c4aa08e0320a3368e5e633b69e8f57e33fe4589e71bbfd1013c6088f23: Status 404 returned error can't find the container with id ba4207c4aa08e0320a3368e5e633b69e8f57e33fe4589e71bbfd1013c6088f23 Oct 11 04:16:07 crc kubenswrapper[4798]: I1011 04:16:07.310036 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-79zk4"] Oct 11 04:16:07 crc kubenswrapper[4798]: I1011 04:16:07.343883 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-79zk4" event={"ID":"326c1bc8-1869-4ca2-8e8b-10356a3ca498","Type":"ContainerStarted","Data":"ba4207c4aa08e0320a3368e5e633b69e8f57e33fe4589e71bbfd1013c6088f23"} Oct 11 04:16:08 crc kubenswrapper[4798]: I1011 04:16:08.360236 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-79zk4" event={"ID":"326c1bc8-1869-4ca2-8e8b-10356a3ca498","Type":"ContainerStarted","Data":"40d1cb237ac5be84bd9bbc89bc64afbb7c8b554afa9505468407a9a2f73465d2"} Oct 11 04:16:08 crc kubenswrapper[4798]: I1011 04:16:08.384688 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-79zk4" podStartSLOduration=1.958176806 podStartE2EDuration="2.384670232s" podCreationTimestamp="2025-10-11 04:16:06 +0000 UTC" firstStartedPulling="2025-10-11 04:16:07.305702357 +0000 UTC m=+1262.641992063" lastFinishedPulling="2025-10-11 04:16:07.732195803 +0000 UTC m=+1263.068485489" observedRunningTime="2025-10-11 04:16:08.383456822 +0000 UTC m=+1263.719746518" watchObservedRunningTime="2025-10-11 04:16:08.384670232 +0000 UTC m=+1263.720959918" Oct 11 04:16:27 crc kubenswrapper[4798]: I1011 04:16:27.138526 4798 patch_prober.go:28] interesting pod/machine-config-daemon-h28s2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 11 04:16:27 crc kubenswrapper[4798]: I1011 04:16:27.139424 4798 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 11 04:16:57 crc kubenswrapper[4798]: I1011 04:16:57.138813 4798 patch_prober.go:28] interesting pod/machine-config-daemon-h28s2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 11 04:16:57 crc kubenswrapper[4798]: I1011 04:16:57.139953 4798 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 11 04:16:57 crc kubenswrapper[4798]: I1011 04:16:57.140056 4798 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" Oct 11 04:16:57 crc kubenswrapper[4798]: I1011 04:16:57.141759 4798 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"0ede2098e77cd102846ac8838309a8f0eccb5d93df251b4af8b848dec6f5e092"} pod="openshift-machine-config-operator/machine-config-daemon-h28s2" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Oct 11 04:16:57 crc kubenswrapper[4798]: I1011 04:16:57.141905 4798 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" containerName="machine-config-daemon" containerID="cri-o://0ede2098e77cd102846ac8838309a8f0eccb5d93df251b4af8b848dec6f5e092" gracePeriod=600 Oct 11 04:16:57 crc kubenswrapper[4798]: I1011 04:16:57.882674 4798 generic.go:334] "Generic (PLEG): container finished" podID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" containerID="0ede2098e77cd102846ac8838309a8f0eccb5d93df251b4af8b848dec6f5e092" exitCode=0 Oct 11 04:16:57 crc kubenswrapper[4798]: I1011 04:16:57.882793 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" event={"ID":"42571bc8-2186-4e3b-bba9-28f5a8f364d0","Type":"ContainerDied","Data":"0ede2098e77cd102846ac8838309a8f0eccb5d93df251b4af8b848dec6f5e092"} Oct 11 04:16:57 crc kubenswrapper[4798]: I1011 04:16:57.883511 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" event={"ID":"42571bc8-2186-4e3b-bba9-28f5a8f364d0","Type":"ContainerStarted","Data":"2e8e169485a90028020fcd80b69a1027ed01adf533cd6bd26b4fff5a221bbbae"} Oct 11 04:16:57 crc kubenswrapper[4798]: I1011 04:16:57.883543 4798 scope.go:117] "RemoveContainer" containerID="d3a7cc4feeb865c985e6b8c7245a1d16577ae082c25541425e42b8ef5e8c15f6" Oct 11 04:17:18 crc kubenswrapper[4798]: I1011 04:17:18.821370 4798 scope.go:117] "RemoveContainer" containerID="eddd09c98c7ff46535ab323792d16179eeb6c27c1458ecb69baf1e866d44660e" Oct 11 04:17:18 crc kubenswrapper[4798]: I1011 04:17:18.849701 4798 scope.go:117] "RemoveContainer" containerID="9ae9a24e22a6942b2037276acde54483ffc2f9a5887c52cd49362217cdd6f6ef" Oct 11 04:18:18 crc kubenswrapper[4798]: I1011 04:18:18.948238 4798 scope.go:117] "RemoveContainer" containerID="4a255db8dd7d1dc2df8990b631f417baf82fd680863e71a683d0e88e8a4b6e89" Oct 11 04:18:26 crc kubenswrapper[4798]: I1011 04:18:26.314902 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-c6mm8"] Oct 11 04:18:26 crc kubenswrapper[4798]: I1011 04:18:26.323103 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-c6mm8"] Oct 11 04:18:26 crc kubenswrapper[4798]: I1011 04:18:26.323648 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-c6mm8" Oct 11 04:18:26 crc kubenswrapper[4798]: I1011 04:18:26.442590 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2c659e5f-e03e-4a66-9635-6378dd761663-catalog-content\") pod \"certified-operators-c6mm8\" (UID: \"2c659e5f-e03e-4a66-9635-6378dd761663\") " pod="openshift-marketplace/certified-operators-c6mm8" Oct 11 04:18:26 crc kubenswrapper[4798]: I1011 04:18:26.442712 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2c659e5f-e03e-4a66-9635-6378dd761663-utilities\") pod \"certified-operators-c6mm8\" (UID: \"2c659e5f-e03e-4a66-9635-6378dd761663\") " pod="openshift-marketplace/certified-operators-c6mm8" Oct 11 04:18:26 crc kubenswrapper[4798]: I1011 04:18:26.442940 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-splw6\" (UniqueName: \"kubernetes.io/projected/2c659e5f-e03e-4a66-9635-6378dd761663-kube-api-access-splw6\") pod \"certified-operators-c6mm8\" (UID: \"2c659e5f-e03e-4a66-9635-6378dd761663\") " pod="openshift-marketplace/certified-operators-c6mm8" Oct 11 04:18:26 crc kubenswrapper[4798]: I1011 04:18:26.545167 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2c659e5f-e03e-4a66-9635-6378dd761663-catalog-content\") pod \"certified-operators-c6mm8\" (UID: \"2c659e5f-e03e-4a66-9635-6378dd761663\") " pod="openshift-marketplace/certified-operators-c6mm8" Oct 11 04:18:26 crc kubenswrapper[4798]: I1011 04:18:26.545749 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2c659e5f-e03e-4a66-9635-6378dd761663-utilities\") pod \"certified-operators-c6mm8\" (UID: \"2c659e5f-e03e-4a66-9635-6378dd761663\") " pod="openshift-marketplace/certified-operators-c6mm8" Oct 11 04:18:26 crc kubenswrapper[4798]: I1011 04:18:26.545809 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2c659e5f-e03e-4a66-9635-6378dd761663-catalog-content\") pod \"certified-operators-c6mm8\" (UID: \"2c659e5f-e03e-4a66-9635-6378dd761663\") " pod="openshift-marketplace/certified-operators-c6mm8" Oct 11 04:18:26 crc kubenswrapper[4798]: I1011 04:18:26.545840 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-splw6\" (UniqueName: \"kubernetes.io/projected/2c659e5f-e03e-4a66-9635-6378dd761663-kube-api-access-splw6\") pod \"certified-operators-c6mm8\" (UID: \"2c659e5f-e03e-4a66-9635-6378dd761663\") " pod="openshift-marketplace/certified-operators-c6mm8" Oct 11 04:18:26 crc kubenswrapper[4798]: I1011 04:18:26.546062 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2c659e5f-e03e-4a66-9635-6378dd761663-utilities\") pod \"certified-operators-c6mm8\" (UID: \"2c659e5f-e03e-4a66-9635-6378dd761663\") " pod="openshift-marketplace/certified-operators-c6mm8" Oct 11 04:18:26 crc kubenswrapper[4798]: I1011 04:18:26.568827 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-splw6\" (UniqueName: \"kubernetes.io/projected/2c659e5f-e03e-4a66-9635-6378dd761663-kube-api-access-splw6\") pod \"certified-operators-c6mm8\" (UID: \"2c659e5f-e03e-4a66-9635-6378dd761663\") " pod="openshift-marketplace/certified-operators-c6mm8" Oct 11 04:18:26 crc kubenswrapper[4798]: I1011 04:18:26.668212 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-c6mm8" Oct 11 04:18:27 crc kubenswrapper[4798]: I1011 04:18:27.187728 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-c6mm8"] Oct 11 04:18:27 crc kubenswrapper[4798]: W1011 04:18:27.192259 4798 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2c659e5f_e03e_4a66_9635_6378dd761663.slice/crio-5a26f5ed78ab5e33b73b58c5664a135e690d04d77e2938bd118505d7a76589fb WatchSource:0}: Error finding container 5a26f5ed78ab5e33b73b58c5664a135e690d04d77e2938bd118505d7a76589fb: Status 404 returned error can't find the container with id 5a26f5ed78ab5e33b73b58c5664a135e690d04d77e2938bd118505d7a76589fb Oct 11 04:18:27 crc kubenswrapper[4798]: I1011 04:18:27.842462 4798 generic.go:334] "Generic (PLEG): container finished" podID="2c659e5f-e03e-4a66-9635-6378dd761663" containerID="d4c7a400cb9fedfee66d81f020c2d954e1f6597875e13135d0c85b8c51c7ed68" exitCode=0 Oct 11 04:18:27 crc kubenswrapper[4798]: I1011 04:18:27.842505 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-c6mm8" event={"ID":"2c659e5f-e03e-4a66-9635-6378dd761663","Type":"ContainerDied","Data":"d4c7a400cb9fedfee66d81f020c2d954e1f6597875e13135d0c85b8c51c7ed68"} Oct 11 04:18:27 crc kubenswrapper[4798]: I1011 04:18:27.842878 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-c6mm8" event={"ID":"2c659e5f-e03e-4a66-9635-6378dd761663","Type":"ContainerStarted","Data":"5a26f5ed78ab5e33b73b58c5664a135e690d04d77e2938bd118505d7a76589fb"} Oct 11 04:18:28 crc kubenswrapper[4798]: I1011 04:18:28.854415 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-c6mm8" event={"ID":"2c659e5f-e03e-4a66-9635-6378dd761663","Type":"ContainerStarted","Data":"3ab49639d5a71d102da366eef003e3d576da11b7b46c6522f586e4b0d7514464"} Oct 11 04:18:29 crc kubenswrapper[4798]: I1011 04:18:29.867615 4798 generic.go:334] "Generic (PLEG): container finished" podID="2c659e5f-e03e-4a66-9635-6378dd761663" containerID="3ab49639d5a71d102da366eef003e3d576da11b7b46c6522f586e4b0d7514464" exitCode=0 Oct 11 04:18:29 crc kubenswrapper[4798]: I1011 04:18:29.867693 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-c6mm8" event={"ID":"2c659e5f-e03e-4a66-9635-6378dd761663","Type":"ContainerDied","Data":"3ab49639d5a71d102da366eef003e3d576da11b7b46c6522f586e4b0d7514464"} Oct 11 04:18:30 crc kubenswrapper[4798]: I1011 04:18:30.880835 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-c6mm8" event={"ID":"2c659e5f-e03e-4a66-9635-6378dd761663","Type":"ContainerStarted","Data":"a8c7aba19aa73b30b05afb742d871084b02a12f007b40178384baee39dd1c689"} Oct 11 04:18:30 crc kubenswrapper[4798]: I1011 04:18:30.904726 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-c6mm8" podStartSLOduration=2.452000231 podStartE2EDuration="4.904705814s" podCreationTimestamp="2025-10-11 04:18:26 +0000 UTC" firstStartedPulling="2025-10-11 04:18:27.845253512 +0000 UTC m=+1403.181543198" lastFinishedPulling="2025-10-11 04:18:30.297959095 +0000 UTC m=+1405.634248781" observedRunningTime="2025-10-11 04:18:30.899631643 +0000 UTC m=+1406.235921329" watchObservedRunningTime="2025-10-11 04:18:30.904705814 +0000 UTC m=+1406.240995500" Oct 11 04:18:36 crc kubenswrapper[4798]: I1011 04:18:36.668092 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-c6mm8" Oct 11 04:18:36 crc kubenswrapper[4798]: I1011 04:18:36.669930 4798 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-c6mm8" Oct 11 04:18:36 crc kubenswrapper[4798]: I1011 04:18:36.734680 4798 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-c6mm8" Oct 11 04:18:36 crc kubenswrapper[4798]: I1011 04:18:36.994518 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-c6mm8" Oct 11 04:18:37 crc kubenswrapper[4798]: I1011 04:18:37.048776 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-c6mm8"] Oct 11 04:18:38 crc kubenswrapper[4798]: I1011 04:18:38.957508 4798 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-c6mm8" podUID="2c659e5f-e03e-4a66-9635-6378dd761663" containerName="registry-server" containerID="cri-o://a8c7aba19aa73b30b05afb742d871084b02a12f007b40178384baee39dd1c689" gracePeriod=2 Oct 11 04:18:39 crc kubenswrapper[4798]: I1011 04:18:39.385315 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-c6mm8" Oct 11 04:18:39 crc kubenswrapper[4798]: I1011 04:18:39.405370 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-splw6\" (UniqueName: \"kubernetes.io/projected/2c659e5f-e03e-4a66-9635-6378dd761663-kube-api-access-splw6\") pod \"2c659e5f-e03e-4a66-9635-6378dd761663\" (UID: \"2c659e5f-e03e-4a66-9635-6378dd761663\") " Oct 11 04:18:39 crc kubenswrapper[4798]: I1011 04:18:39.405729 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2c659e5f-e03e-4a66-9635-6378dd761663-utilities\") pod \"2c659e5f-e03e-4a66-9635-6378dd761663\" (UID: \"2c659e5f-e03e-4a66-9635-6378dd761663\") " Oct 11 04:18:39 crc kubenswrapper[4798]: I1011 04:18:39.405769 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2c659e5f-e03e-4a66-9635-6378dd761663-catalog-content\") pod \"2c659e5f-e03e-4a66-9635-6378dd761663\" (UID: \"2c659e5f-e03e-4a66-9635-6378dd761663\") " Oct 11 04:18:39 crc kubenswrapper[4798]: I1011 04:18:39.406458 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2c659e5f-e03e-4a66-9635-6378dd761663-utilities" (OuterVolumeSpecName: "utilities") pod "2c659e5f-e03e-4a66-9635-6378dd761663" (UID: "2c659e5f-e03e-4a66-9635-6378dd761663"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 04:18:39 crc kubenswrapper[4798]: I1011 04:18:39.408357 4798 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2c659e5f-e03e-4a66-9635-6378dd761663-utilities\") on node \"crc\" DevicePath \"\"" Oct 11 04:18:39 crc kubenswrapper[4798]: I1011 04:18:39.413002 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2c659e5f-e03e-4a66-9635-6378dd761663-kube-api-access-splw6" (OuterVolumeSpecName: "kube-api-access-splw6") pod "2c659e5f-e03e-4a66-9635-6378dd761663" (UID: "2c659e5f-e03e-4a66-9635-6378dd761663"). InnerVolumeSpecName "kube-api-access-splw6". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 04:18:39 crc kubenswrapper[4798]: I1011 04:18:39.452545 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2c659e5f-e03e-4a66-9635-6378dd761663-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "2c659e5f-e03e-4a66-9635-6378dd761663" (UID: "2c659e5f-e03e-4a66-9635-6378dd761663"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 04:18:39 crc kubenswrapper[4798]: I1011 04:18:39.510815 4798 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2c659e5f-e03e-4a66-9635-6378dd761663-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 11 04:18:39 crc kubenswrapper[4798]: I1011 04:18:39.511152 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-splw6\" (UniqueName: \"kubernetes.io/projected/2c659e5f-e03e-4a66-9635-6378dd761663-kube-api-access-splw6\") on node \"crc\" DevicePath \"\"" Oct 11 04:18:39 crc kubenswrapper[4798]: I1011 04:18:39.968534 4798 generic.go:334] "Generic (PLEG): container finished" podID="2c659e5f-e03e-4a66-9635-6378dd761663" containerID="a8c7aba19aa73b30b05afb742d871084b02a12f007b40178384baee39dd1c689" exitCode=0 Oct 11 04:18:39 crc kubenswrapper[4798]: I1011 04:18:39.968604 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-c6mm8" event={"ID":"2c659e5f-e03e-4a66-9635-6378dd761663","Type":"ContainerDied","Data":"a8c7aba19aa73b30b05afb742d871084b02a12f007b40178384baee39dd1c689"} Oct 11 04:18:39 crc kubenswrapper[4798]: I1011 04:18:39.968677 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-c6mm8" event={"ID":"2c659e5f-e03e-4a66-9635-6378dd761663","Type":"ContainerDied","Data":"5a26f5ed78ab5e33b73b58c5664a135e690d04d77e2938bd118505d7a76589fb"} Oct 11 04:18:39 crc kubenswrapper[4798]: I1011 04:18:39.968700 4798 scope.go:117] "RemoveContainer" containerID="a8c7aba19aa73b30b05afb742d871084b02a12f007b40178384baee39dd1c689" Oct 11 04:18:39 crc kubenswrapper[4798]: I1011 04:18:39.968623 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-c6mm8" Oct 11 04:18:40 crc kubenswrapper[4798]: I1011 04:18:40.003329 4798 scope.go:117] "RemoveContainer" containerID="3ab49639d5a71d102da366eef003e3d576da11b7b46c6522f586e4b0d7514464" Oct 11 04:18:40 crc kubenswrapper[4798]: I1011 04:18:40.006670 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-c6mm8"] Oct 11 04:18:40 crc kubenswrapper[4798]: I1011 04:18:40.018206 4798 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-c6mm8"] Oct 11 04:18:40 crc kubenswrapper[4798]: I1011 04:18:40.026861 4798 scope.go:117] "RemoveContainer" containerID="d4c7a400cb9fedfee66d81f020c2d954e1f6597875e13135d0c85b8c51c7ed68" Oct 11 04:18:40 crc kubenswrapper[4798]: I1011 04:18:40.068284 4798 scope.go:117] "RemoveContainer" containerID="a8c7aba19aa73b30b05afb742d871084b02a12f007b40178384baee39dd1c689" Oct 11 04:18:40 crc kubenswrapper[4798]: E1011 04:18:40.068777 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a8c7aba19aa73b30b05afb742d871084b02a12f007b40178384baee39dd1c689\": container with ID starting with a8c7aba19aa73b30b05afb742d871084b02a12f007b40178384baee39dd1c689 not found: ID does not exist" containerID="a8c7aba19aa73b30b05afb742d871084b02a12f007b40178384baee39dd1c689" Oct 11 04:18:40 crc kubenswrapper[4798]: I1011 04:18:40.068815 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a8c7aba19aa73b30b05afb742d871084b02a12f007b40178384baee39dd1c689"} err="failed to get container status \"a8c7aba19aa73b30b05afb742d871084b02a12f007b40178384baee39dd1c689\": rpc error: code = NotFound desc = could not find container \"a8c7aba19aa73b30b05afb742d871084b02a12f007b40178384baee39dd1c689\": container with ID starting with a8c7aba19aa73b30b05afb742d871084b02a12f007b40178384baee39dd1c689 not found: ID does not exist" Oct 11 04:18:40 crc kubenswrapper[4798]: I1011 04:18:40.068835 4798 scope.go:117] "RemoveContainer" containerID="3ab49639d5a71d102da366eef003e3d576da11b7b46c6522f586e4b0d7514464" Oct 11 04:18:40 crc kubenswrapper[4798]: E1011 04:18:40.069137 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3ab49639d5a71d102da366eef003e3d576da11b7b46c6522f586e4b0d7514464\": container with ID starting with 3ab49639d5a71d102da366eef003e3d576da11b7b46c6522f586e4b0d7514464 not found: ID does not exist" containerID="3ab49639d5a71d102da366eef003e3d576da11b7b46c6522f586e4b0d7514464" Oct 11 04:18:40 crc kubenswrapper[4798]: I1011 04:18:40.069162 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3ab49639d5a71d102da366eef003e3d576da11b7b46c6522f586e4b0d7514464"} err="failed to get container status \"3ab49639d5a71d102da366eef003e3d576da11b7b46c6522f586e4b0d7514464\": rpc error: code = NotFound desc = could not find container \"3ab49639d5a71d102da366eef003e3d576da11b7b46c6522f586e4b0d7514464\": container with ID starting with 3ab49639d5a71d102da366eef003e3d576da11b7b46c6522f586e4b0d7514464 not found: ID does not exist" Oct 11 04:18:40 crc kubenswrapper[4798]: I1011 04:18:40.069176 4798 scope.go:117] "RemoveContainer" containerID="d4c7a400cb9fedfee66d81f020c2d954e1f6597875e13135d0c85b8c51c7ed68" Oct 11 04:18:40 crc kubenswrapper[4798]: E1011 04:18:40.069453 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d4c7a400cb9fedfee66d81f020c2d954e1f6597875e13135d0c85b8c51c7ed68\": container with ID starting with d4c7a400cb9fedfee66d81f020c2d954e1f6597875e13135d0c85b8c51c7ed68 not found: ID does not exist" containerID="d4c7a400cb9fedfee66d81f020c2d954e1f6597875e13135d0c85b8c51c7ed68" Oct 11 04:18:40 crc kubenswrapper[4798]: I1011 04:18:40.069485 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d4c7a400cb9fedfee66d81f020c2d954e1f6597875e13135d0c85b8c51c7ed68"} err="failed to get container status \"d4c7a400cb9fedfee66d81f020c2d954e1f6597875e13135d0c85b8c51c7ed68\": rpc error: code = NotFound desc = could not find container \"d4c7a400cb9fedfee66d81f020c2d954e1f6597875e13135d0c85b8c51c7ed68\": container with ID starting with d4c7a400cb9fedfee66d81f020c2d954e1f6597875e13135d0c85b8c51c7ed68 not found: ID does not exist" Oct 11 04:18:41 crc kubenswrapper[4798]: I1011 04:18:41.440019 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2c659e5f-e03e-4a66-9635-6378dd761663" path="/var/lib/kubelet/pods/2c659e5f-e03e-4a66-9635-6378dd761663/volumes" Oct 11 04:18:52 crc kubenswrapper[4798]: I1011 04:18:52.119184 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-r4zbt"] Oct 11 04:18:52 crc kubenswrapper[4798]: E1011 04:18:52.120077 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2c659e5f-e03e-4a66-9635-6378dd761663" containerName="extract-utilities" Oct 11 04:18:52 crc kubenswrapper[4798]: I1011 04:18:52.120090 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="2c659e5f-e03e-4a66-9635-6378dd761663" containerName="extract-utilities" Oct 11 04:18:52 crc kubenswrapper[4798]: E1011 04:18:52.120105 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2c659e5f-e03e-4a66-9635-6378dd761663" containerName="extract-content" Oct 11 04:18:52 crc kubenswrapper[4798]: I1011 04:18:52.120111 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="2c659e5f-e03e-4a66-9635-6378dd761663" containerName="extract-content" Oct 11 04:18:52 crc kubenswrapper[4798]: E1011 04:18:52.120122 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2c659e5f-e03e-4a66-9635-6378dd761663" containerName="registry-server" Oct 11 04:18:52 crc kubenswrapper[4798]: I1011 04:18:52.120127 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="2c659e5f-e03e-4a66-9635-6378dd761663" containerName="registry-server" Oct 11 04:18:52 crc kubenswrapper[4798]: I1011 04:18:52.120297 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="2c659e5f-e03e-4a66-9635-6378dd761663" containerName="registry-server" Oct 11 04:18:52 crc kubenswrapper[4798]: I1011 04:18:52.121754 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-r4zbt" Oct 11 04:18:52 crc kubenswrapper[4798]: I1011 04:18:52.131363 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-r4zbt"] Oct 11 04:18:52 crc kubenswrapper[4798]: I1011 04:18:52.281261 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4b54f72e-e8dc-4991-be77-fe72d69a0cb8-utilities\") pod \"redhat-operators-r4zbt\" (UID: \"4b54f72e-e8dc-4991-be77-fe72d69a0cb8\") " pod="openshift-marketplace/redhat-operators-r4zbt" Oct 11 04:18:52 crc kubenswrapper[4798]: I1011 04:18:52.281951 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hclzk\" (UniqueName: \"kubernetes.io/projected/4b54f72e-e8dc-4991-be77-fe72d69a0cb8-kube-api-access-hclzk\") pod \"redhat-operators-r4zbt\" (UID: \"4b54f72e-e8dc-4991-be77-fe72d69a0cb8\") " pod="openshift-marketplace/redhat-operators-r4zbt" Oct 11 04:18:52 crc kubenswrapper[4798]: I1011 04:18:52.282030 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4b54f72e-e8dc-4991-be77-fe72d69a0cb8-catalog-content\") pod \"redhat-operators-r4zbt\" (UID: \"4b54f72e-e8dc-4991-be77-fe72d69a0cb8\") " pod="openshift-marketplace/redhat-operators-r4zbt" Oct 11 04:18:52 crc kubenswrapper[4798]: I1011 04:18:52.384877 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4b54f72e-e8dc-4991-be77-fe72d69a0cb8-catalog-content\") pod \"redhat-operators-r4zbt\" (UID: \"4b54f72e-e8dc-4991-be77-fe72d69a0cb8\") " pod="openshift-marketplace/redhat-operators-r4zbt" Oct 11 04:18:52 crc kubenswrapper[4798]: I1011 04:18:52.385103 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4b54f72e-e8dc-4991-be77-fe72d69a0cb8-utilities\") pod \"redhat-operators-r4zbt\" (UID: \"4b54f72e-e8dc-4991-be77-fe72d69a0cb8\") " pod="openshift-marketplace/redhat-operators-r4zbt" Oct 11 04:18:52 crc kubenswrapper[4798]: I1011 04:18:52.385125 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hclzk\" (UniqueName: \"kubernetes.io/projected/4b54f72e-e8dc-4991-be77-fe72d69a0cb8-kube-api-access-hclzk\") pod \"redhat-operators-r4zbt\" (UID: \"4b54f72e-e8dc-4991-be77-fe72d69a0cb8\") " pod="openshift-marketplace/redhat-operators-r4zbt" Oct 11 04:18:52 crc kubenswrapper[4798]: I1011 04:18:52.385373 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4b54f72e-e8dc-4991-be77-fe72d69a0cb8-catalog-content\") pod \"redhat-operators-r4zbt\" (UID: \"4b54f72e-e8dc-4991-be77-fe72d69a0cb8\") " pod="openshift-marketplace/redhat-operators-r4zbt" Oct 11 04:18:52 crc kubenswrapper[4798]: I1011 04:18:52.385834 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4b54f72e-e8dc-4991-be77-fe72d69a0cb8-utilities\") pod \"redhat-operators-r4zbt\" (UID: \"4b54f72e-e8dc-4991-be77-fe72d69a0cb8\") " pod="openshift-marketplace/redhat-operators-r4zbt" Oct 11 04:18:52 crc kubenswrapper[4798]: I1011 04:18:52.418667 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hclzk\" (UniqueName: \"kubernetes.io/projected/4b54f72e-e8dc-4991-be77-fe72d69a0cb8-kube-api-access-hclzk\") pod \"redhat-operators-r4zbt\" (UID: \"4b54f72e-e8dc-4991-be77-fe72d69a0cb8\") " pod="openshift-marketplace/redhat-operators-r4zbt" Oct 11 04:18:52 crc kubenswrapper[4798]: I1011 04:18:52.451141 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-r4zbt" Oct 11 04:18:52 crc kubenswrapper[4798]: I1011 04:18:52.890706 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-r4zbt"] Oct 11 04:18:53 crc kubenswrapper[4798]: I1011 04:18:53.107634 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-r4zbt" event={"ID":"4b54f72e-e8dc-4991-be77-fe72d69a0cb8","Type":"ContainerStarted","Data":"42517aa15023351dc1b0dc0b4d27df237dd91fcb3b9cfd0360438cbef128ce5f"} Oct 11 04:18:54 crc kubenswrapper[4798]: I1011 04:18:54.120863 4798 generic.go:334] "Generic (PLEG): container finished" podID="4b54f72e-e8dc-4991-be77-fe72d69a0cb8" containerID="8b777494f9ac6b46fa9e649ca87a25fde254c3e01fc44695b011efb64b62d207" exitCode=0 Oct 11 04:18:54 crc kubenswrapper[4798]: I1011 04:18:54.120925 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-r4zbt" event={"ID":"4b54f72e-e8dc-4991-be77-fe72d69a0cb8","Type":"ContainerDied","Data":"8b777494f9ac6b46fa9e649ca87a25fde254c3e01fc44695b011efb64b62d207"} Oct 11 04:18:55 crc kubenswrapper[4798]: I1011 04:18:55.139408 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-r4zbt" event={"ID":"4b54f72e-e8dc-4991-be77-fe72d69a0cb8","Type":"ContainerStarted","Data":"770590cc6a0a77c14539d83310edcff691200d126748eda186436ecb6441b7b6"} Oct 11 04:18:56 crc kubenswrapper[4798]: I1011 04:18:56.162722 4798 generic.go:334] "Generic (PLEG): container finished" podID="4b54f72e-e8dc-4991-be77-fe72d69a0cb8" containerID="770590cc6a0a77c14539d83310edcff691200d126748eda186436ecb6441b7b6" exitCode=0 Oct 11 04:18:56 crc kubenswrapper[4798]: I1011 04:18:56.163092 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-r4zbt" event={"ID":"4b54f72e-e8dc-4991-be77-fe72d69a0cb8","Type":"ContainerDied","Data":"770590cc6a0a77c14539d83310edcff691200d126748eda186436ecb6441b7b6"} Oct 11 04:18:57 crc kubenswrapper[4798]: I1011 04:18:57.139227 4798 patch_prober.go:28] interesting pod/machine-config-daemon-h28s2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 11 04:18:57 crc kubenswrapper[4798]: I1011 04:18:57.140131 4798 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 11 04:18:57 crc kubenswrapper[4798]: I1011 04:18:57.893694 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-zwklz"] Oct 11 04:18:57 crc kubenswrapper[4798]: I1011 04:18:57.895460 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-zwklz" Oct 11 04:18:57 crc kubenswrapper[4798]: I1011 04:18:57.907109 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-zwklz"] Oct 11 04:18:57 crc kubenswrapper[4798]: I1011 04:18:57.994248 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/602730bb-ce0f-4773-9988-4b2db4032c10-catalog-content\") pod \"redhat-marketplace-zwklz\" (UID: \"602730bb-ce0f-4773-9988-4b2db4032c10\") " pod="openshift-marketplace/redhat-marketplace-zwklz" Oct 11 04:18:57 crc kubenswrapper[4798]: I1011 04:18:57.994638 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/602730bb-ce0f-4773-9988-4b2db4032c10-utilities\") pod \"redhat-marketplace-zwklz\" (UID: \"602730bb-ce0f-4773-9988-4b2db4032c10\") " pod="openshift-marketplace/redhat-marketplace-zwklz" Oct 11 04:18:57 crc kubenswrapper[4798]: I1011 04:18:57.994952 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gjplg\" (UniqueName: \"kubernetes.io/projected/602730bb-ce0f-4773-9988-4b2db4032c10-kube-api-access-gjplg\") pod \"redhat-marketplace-zwklz\" (UID: \"602730bb-ce0f-4773-9988-4b2db4032c10\") " pod="openshift-marketplace/redhat-marketplace-zwklz" Oct 11 04:18:58 crc kubenswrapper[4798]: I1011 04:18:58.096904 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/602730bb-ce0f-4773-9988-4b2db4032c10-utilities\") pod \"redhat-marketplace-zwklz\" (UID: \"602730bb-ce0f-4773-9988-4b2db4032c10\") " pod="openshift-marketplace/redhat-marketplace-zwklz" Oct 11 04:18:58 crc kubenswrapper[4798]: I1011 04:18:58.096995 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gjplg\" (UniqueName: \"kubernetes.io/projected/602730bb-ce0f-4773-9988-4b2db4032c10-kube-api-access-gjplg\") pod \"redhat-marketplace-zwklz\" (UID: \"602730bb-ce0f-4773-9988-4b2db4032c10\") " pod="openshift-marketplace/redhat-marketplace-zwklz" Oct 11 04:18:58 crc kubenswrapper[4798]: I1011 04:18:58.097032 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/602730bb-ce0f-4773-9988-4b2db4032c10-catalog-content\") pod \"redhat-marketplace-zwklz\" (UID: \"602730bb-ce0f-4773-9988-4b2db4032c10\") " pod="openshift-marketplace/redhat-marketplace-zwklz" Oct 11 04:18:58 crc kubenswrapper[4798]: I1011 04:18:58.097482 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/602730bb-ce0f-4773-9988-4b2db4032c10-utilities\") pod \"redhat-marketplace-zwklz\" (UID: \"602730bb-ce0f-4773-9988-4b2db4032c10\") " pod="openshift-marketplace/redhat-marketplace-zwklz" Oct 11 04:18:58 crc kubenswrapper[4798]: I1011 04:18:58.097519 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/602730bb-ce0f-4773-9988-4b2db4032c10-catalog-content\") pod \"redhat-marketplace-zwklz\" (UID: \"602730bb-ce0f-4773-9988-4b2db4032c10\") " pod="openshift-marketplace/redhat-marketplace-zwklz" Oct 11 04:18:58 crc kubenswrapper[4798]: I1011 04:18:58.120580 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gjplg\" (UniqueName: \"kubernetes.io/projected/602730bb-ce0f-4773-9988-4b2db4032c10-kube-api-access-gjplg\") pod \"redhat-marketplace-zwklz\" (UID: \"602730bb-ce0f-4773-9988-4b2db4032c10\") " pod="openshift-marketplace/redhat-marketplace-zwklz" Oct 11 04:18:58 crc kubenswrapper[4798]: I1011 04:18:58.185956 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-r4zbt" event={"ID":"4b54f72e-e8dc-4991-be77-fe72d69a0cb8","Type":"ContainerStarted","Data":"1a69778b5764708eae872fc81613ecdd3edd187c4477db132cbaaf7ae20848f3"} Oct 11 04:18:58 crc kubenswrapper[4798]: I1011 04:18:58.218351 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-zwklz" Oct 11 04:18:58 crc kubenswrapper[4798]: I1011 04:18:58.696206 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-zwklz"] Oct 11 04:18:58 crc kubenswrapper[4798]: W1011 04:18:58.705110 4798 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod602730bb_ce0f_4773_9988_4b2db4032c10.slice/crio-02c47db7374833b63f6698a3aa3717d34b3f4a14dcaffb87897f9811a5333e63 WatchSource:0}: Error finding container 02c47db7374833b63f6698a3aa3717d34b3f4a14dcaffb87897f9811a5333e63: Status 404 returned error can't find the container with id 02c47db7374833b63f6698a3aa3717d34b3f4a14dcaffb87897f9811a5333e63 Oct 11 04:18:59 crc kubenswrapper[4798]: I1011 04:18:59.198039 4798 generic.go:334] "Generic (PLEG): container finished" podID="602730bb-ce0f-4773-9988-4b2db4032c10" containerID="823c2f1eeb519e1e6fc9918a579172a77499bfea6288b24129c808a89fb8d0b5" exitCode=0 Oct 11 04:18:59 crc kubenswrapper[4798]: I1011 04:18:59.198271 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-zwklz" event={"ID":"602730bb-ce0f-4773-9988-4b2db4032c10","Type":"ContainerDied","Data":"823c2f1eeb519e1e6fc9918a579172a77499bfea6288b24129c808a89fb8d0b5"} Oct 11 04:18:59 crc kubenswrapper[4798]: I1011 04:18:59.198325 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-zwklz" event={"ID":"602730bb-ce0f-4773-9988-4b2db4032c10","Type":"ContainerStarted","Data":"02c47db7374833b63f6698a3aa3717d34b3f4a14dcaffb87897f9811a5333e63"} Oct 11 04:18:59 crc kubenswrapper[4798]: I1011 04:18:59.259674 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-r4zbt" podStartSLOduration=4.705067596 podStartE2EDuration="7.259651182s" podCreationTimestamp="2025-10-11 04:18:52 +0000 UTC" firstStartedPulling="2025-10-11 04:18:54.124324043 +0000 UTC m=+1429.460613739" lastFinishedPulling="2025-10-11 04:18:56.678907629 +0000 UTC m=+1432.015197325" observedRunningTime="2025-10-11 04:18:59.248033424 +0000 UTC m=+1434.584323120" watchObservedRunningTime="2025-10-11 04:18:59.259651182 +0000 UTC m=+1434.595940868" Oct 11 04:19:00 crc kubenswrapper[4798]: I1011 04:19:00.209684 4798 generic.go:334] "Generic (PLEG): container finished" podID="602730bb-ce0f-4773-9988-4b2db4032c10" containerID="a4b309338570f9b38d6561ace44e199a2a52e58c786fc6f24ee10716a3781b96" exitCode=0 Oct 11 04:19:00 crc kubenswrapper[4798]: I1011 04:19:00.209762 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-zwklz" event={"ID":"602730bb-ce0f-4773-9988-4b2db4032c10","Type":"ContainerDied","Data":"a4b309338570f9b38d6561ace44e199a2a52e58c786fc6f24ee10716a3781b96"} Oct 11 04:19:01 crc kubenswrapper[4798]: I1011 04:19:01.221094 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-zwklz" event={"ID":"602730bb-ce0f-4773-9988-4b2db4032c10","Type":"ContainerStarted","Data":"d3276b94676d018cf356cb727fc9537f98dfc282a6ea6db9f104454c33e89186"} Oct 11 04:19:01 crc kubenswrapper[4798]: I1011 04:19:01.247302 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-zwklz" podStartSLOduration=2.833061052 podStartE2EDuration="4.247283853s" podCreationTimestamp="2025-10-11 04:18:57 +0000 UTC" firstStartedPulling="2025-10-11 04:18:59.200908514 +0000 UTC m=+1434.537198240" lastFinishedPulling="2025-10-11 04:19:00.615131355 +0000 UTC m=+1435.951421041" observedRunningTime="2025-10-11 04:19:01.243592555 +0000 UTC m=+1436.579882261" watchObservedRunningTime="2025-10-11 04:19:01.247283853 +0000 UTC m=+1436.583573539" Oct 11 04:19:02 crc kubenswrapper[4798]: I1011 04:19:02.452214 4798 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-r4zbt" Oct 11 04:19:02 crc kubenswrapper[4798]: I1011 04:19:02.452675 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-r4zbt" Oct 11 04:19:03 crc kubenswrapper[4798]: I1011 04:19:03.509419 4798 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-r4zbt" podUID="4b54f72e-e8dc-4991-be77-fe72d69a0cb8" containerName="registry-server" probeResult="failure" output=< Oct 11 04:19:03 crc kubenswrapper[4798]: timeout: failed to connect service ":50051" within 1s Oct 11 04:19:03 crc kubenswrapper[4798]: > Oct 11 04:19:08 crc kubenswrapper[4798]: I1011 04:19:08.218559 4798 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-zwklz" Oct 11 04:19:08 crc kubenswrapper[4798]: I1011 04:19:08.219182 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-zwklz" Oct 11 04:19:08 crc kubenswrapper[4798]: I1011 04:19:08.271383 4798 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-zwklz" Oct 11 04:19:08 crc kubenswrapper[4798]: I1011 04:19:08.335751 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-zwklz" Oct 11 04:19:08 crc kubenswrapper[4798]: I1011 04:19:08.511800 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-zwklz"] Oct 11 04:19:09 crc kubenswrapper[4798]: I1011 04:19:09.299152 4798 generic.go:334] "Generic (PLEG): container finished" podID="326c1bc8-1869-4ca2-8e8b-10356a3ca498" containerID="40d1cb237ac5be84bd9bbc89bc64afbb7c8b554afa9505468407a9a2f73465d2" exitCode=0 Oct 11 04:19:09 crc kubenswrapper[4798]: I1011 04:19:09.299272 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-79zk4" event={"ID":"326c1bc8-1869-4ca2-8e8b-10356a3ca498","Type":"ContainerDied","Data":"40d1cb237ac5be84bd9bbc89bc64afbb7c8b554afa9505468407a9a2f73465d2"} Oct 11 04:19:10 crc kubenswrapper[4798]: I1011 04:19:10.311926 4798 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-zwklz" podUID="602730bb-ce0f-4773-9988-4b2db4032c10" containerName="registry-server" containerID="cri-o://d3276b94676d018cf356cb727fc9537f98dfc282a6ea6db9f104454c33e89186" gracePeriod=2 Oct 11 04:19:10 crc kubenswrapper[4798]: I1011 04:19:10.783961 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-79zk4" Oct 11 04:19:10 crc kubenswrapper[4798]: I1011 04:19:10.789515 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-zwklz" Oct 11 04:19:10 crc kubenswrapper[4798]: I1011 04:19:10.874744 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gjplg\" (UniqueName: \"kubernetes.io/projected/602730bb-ce0f-4773-9988-4b2db4032c10-kube-api-access-gjplg\") pod \"602730bb-ce0f-4773-9988-4b2db4032c10\" (UID: \"602730bb-ce0f-4773-9988-4b2db4032c10\") " Oct 11 04:19:10 crc kubenswrapper[4798]: I1011 04:19:10.874811 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/326c1bc8-1869-4ca2-8e8b-10356a3ca498-bootstrap-combined-ca-bundle\") pod \"326c1bc8-1869-4ca2-8e8b-10356a3ca498\" (UID: \"326c1bc8-1869-4ca2-8e8b-10356a3ca498\") " Oct 11 04:19:10 crc kubenswrapper[4798]: I1011 04:19:10.874840 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/326c1bc8-1869-4ca2-8e8b-10356a3ca498-inventory\") pod \"326c1bc8-1869-4ca2-8e8b-10356a3ca498\" (UID: \"326c1bc8-1869-4ca2-8e8b-10356a3ca498\") " Oct 11 04:19:10 crc kubenswrapper[4798]: I1011 04:19:10.874859 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/602730bb-ce0f-4773-9988-4b2db4032c10-utilities\") pod \"602730bb-ce0f-4773-9988-4b2db4032c10\" (UID: \"602730bb-ce0f-4773-9988-4b2db4032c10\") " Oct 11 04:19:10 crc kubenswrapper[4798]: I1011 04:19:10.874908 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wnzhl\" (UniqueName: \"kubernetes.io/projected/326c1bc8-1869-4ca2-8e8b-10356a3ca498-kube-api-access-wnzhl\") pod \"326c1bc8-1869-4ca2-8e8b-10356a3ca498\" (UID: \"326c1bc8-1869-4ca2-8e8b-10356a3ca498\") " Oct 11 04:19:10 crc kubenswrapper[4798]: I1011 04:19:10.874967 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/602730bb-ce0f-4773-9988-4b2db4032c10-catalog-content\") pod \"602730bb-ce0f-4773-9988-4b2db4032c10\" (UID: \"602730bb-ce0f-4773-9988-4b2db4032c10\") " Oct 11 04:19:10 crc kubenswrapper[4798]: I1011 04:19:10.874992 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/326c1bc8-1869-4ca2-8e8b-10356a3ca498-ssh-key\") pod \"326c1bc8-1869-4ca2-8e8b-10356a3ca498\" (UID: \"326c1bc8-1869-4ca2-8e8b-10356a3ca498\") " Oct 11 04:19:10 crc kubenswrapper[4798]: I1011 04:19:10.875929 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/602730bb-ce0f-4773-9988-4b2db4032c10-utilities" (OuterVolumeSpecName: "utilities") pod "602730bb-ce0f-4773-9988-4b2db4032c10" (UID: "602730bb-ce0f-4773-9988-4b2db4032c10"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 04:19:10 crc kubenswrapper[4798]: I1011 04:19:10.881179 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/326c1bc8-1869-4ca2-8e8b-10356a3ca498-kube-api-access-wnzhl" (OuterVolumeSpecName: "kube-api-access-wnzhl") pod "326c1bc8-1869-4ca2-8e8b-10356a3ca498" (UID: "326c1bc8-1869-4ca2-8e8b-10356a3ca498"). InnerVolumeSpecName "kube-api-access-wnzhl". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 04:19:10 crc kubenswrapper[4798]: I1011 04:19:10.881763 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/326c1bc8-1869-4ca2-8e8b-10356a3ca498-bootstrap-combined-ca-bundle" (OuterVolumeSpecName: "bootstrap-combined-ca-bundle") pod "326c1bc8-1869-4ca2-8e8b-10356a3ca498" (UID: "326c1bc8-1869-4ca2-8e8b-10356a3ca498"). InnerVolumeSpecName "bootstrap-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:19:10 crc kubenswrapper[4798]: I1011 04:19:10.883570 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/602730bb-ce0f-4773-9988-4b2db4032c10-kube-api-access-gjplg" (OuterVolumeSpecName: "kube-api-access-gjplg") pod "602730bb-ce0f-4773-9988-4b2db4032c10" (UID: "602730bb-ce0f-4773-9988-4b2db4032c10"). InnerVolumeSpecName "kube-api-access-gjplg". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 04:19:10 crc kubenswrapper[4798]: I1011 04:19:10.891103 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/602730bb-ce0f-4773-9988-4b2db4032c10-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "602730bb-ce0f-4773-9988-4b2db4032c10" (UID: "602730bb-ce0f-4773-9988-4b2db4032c10"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 04:19:10 crc kubenswrapper[4798]: I1011 04:19:10.902137 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/326c1bc8-1869-4ca2-8e8b-10356a3ca498-inventory" (OuterVolumeSpecName: "inventory") pod "326c1bc8-1869-4ca2-8e8b-10356a3ca498" (UID: "326c1bc8-1869-4ca2-8e8b-10356a3ca498"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:19:10 crc kubenswrapper[4798]: I1011 04:19:10.902607 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/326c1bc8-1869-4ca2-8e8b-10356a3ca498-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "326c1bc8-1869-4ca2-8e8b-10356a3ca498" (UID: "326c1bc8-1869-4ca2-8e8b-10356a3ca498"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:19:10 crc kubenswrapper[4798]: I1011 04:19:10.976655 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wnzhl\" (UniqueName: \"kubernetes.io/projected/326c1bc8-1869-4ca2-8e8b-10356a3ca498-kube-api-access-wnzhl\") on node \"crc\" DevicePath \"\"" Oct 11 04:19:10 crc kubenswrapper[4798]: I1011 04:19:10.976695 4798 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/602730bb-ce0f-4773-9988-4b2db4032c10-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 11 04:19:10 crc kubenswrapper[4798]: I1011 04:19:10.976706 4798 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/326c1bc8-1869-4ca2-8e8b-10356a3ca498-ssh-key\") on node \"crc\" DevicePath \"\"" Oct 11 04:19:10 crc kubenswrapper[4798]: I1011 04:19:10.976716 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gjplg\" (UniqueName: \"kubernetes.io/projected/602730bb-ce0f-4773-9988-4b2db4032c10-kube-api-access-gjplg\") on node \"crc\" DevicePath \"\"" Oct 11 04:19:10 crc kubenswrapper[4798]: I1011 04:19:10.976725 4798 reconciler_common.go:293] "Volume detached for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/326c1bc8-1869-4ca2-8e8b-10356a3ca498-bootstrap-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 11 04:19:10 crc kubenswrapper[4798]: I1011 04:19:10.976733 4798 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/326c1bc8-1869-4ca2-8e8b-10356a3ca498-inventory\") on node \"crc\" DevicePath \"\"" Oct 11 04:19:10 crc kubenswrapper[4798]: I1011 04:19:10.976742 4798 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/602730bb-ce0f-4773-9988-4b2db4032c10-utilities\") on node \"crc\" DevicePath \"\"" Oct 11 04:19:11 crc kubenswrapper[4798]: I1011 04:19:11.326687 4798 generic.go:334] "Generic (PLEG): container finished" podID="602730bb-ce0f-4773-9988-4b2db4032c10" containerID="d3276b94676d018cf356cb727fc9537f98dfc282a6ea6db9f104454c33e89186" exitCode=0 Oct 11 04:19:11 crc kubenswrapper[4798]: I1011 04:19:11.326776 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-zwklz" Oct 11 04:19:11 crc kubenswrapper[4798]: I1011 04:19:11.326782 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-zwklz" event={"ID":"602730bb-ce0f-4773-9988-4b2db4032c10","Type":"ContainerDied","Data":"d3276b94676d018cf356cb727fc9537f98dfc282a6ea6db9f104454c33e89186"} Oct 11 04:19:11 crc kubenswrapper[4798]: I1011 04:19:11.327899 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-zwklz" event={"ID":"602730bb-ce0f-4773-9988-4b2db4032c10","Type":"ContainerDied","Data":"02c47db7374833b63f6698a3aa3717d34b3f4a14dcaffb87897f9811a5333e63"} Oct 11 04:19:11 crc kubenswrapper[4798]: I1011 04:19:11.327931 4798 scope.go:117] "RemoveContainer" containerID="d3276b94676d018cf356cb727fc9537f98dfc282a6ea6db9f104454c33e89186" Oct 11 04:19:11 crc kubenswrapper[4798]: I1011 04:19:11.331220 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-79zk4" event={"ID":"326c1bc8-1869-4ca2-8e8b-10356a3ca498","Type":"ContainerDied","Data":"ba4207c4aa08e0320a3368e5e633b69e8f57e33fe4589e71bbfd1013c6088f23"} Oct 11 04:19:11 crc kubenswrapper[4798]: I1011 04:19:11.331291 4798 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ba4207c4aa08e0320a3368e5e633b69e8f57e33fe4589e71bbfd1013c6088f23" Oct 11 04:19:11 crc kubenswrapper[4798]: I1011 04:19:11.331762 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-79zk4" Oct 11 04:19:11 crc kubenswrapper[4798]: I1011 04:19:11.386153 4798 scope.go:117] "RemoveContainer" containerID="a4b309338570f9b38d6561ace44e199a2a52e58c786fc6f24ee10716a3781b96" Oct 11 04:19:11 crc kubenswrapper[4798]: I1011 04:19:11.405763 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-zwklz"] Oct 11 04:19:11 crc kubenswrapper[4798]: I1011 04:19:11.422170 4798 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-zwklz"] Oct 11 04:19:11 crc kubenswrapper[4798]: I1011 04:19:11.434834 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="602730bb-ce0f-4773-9988-4b2db4032c10" path="/var/lib/kubelet/pods/602730bb-ce0f-4773-9988-4b2db4032c10/volumes" Oct 11 04:19:11 crc kubenswrapper[4798]: I1011 04:19:11.436101 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-bth9j"] Oct 11 04:19:11 crc kubenswrapper[4798]: E1011 04:19:11.438071 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="326c1bc8-1869-4ca2-8e8b-10356a3ca498" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Oct 11 04:19:11 crc kubenswrapper[4798]: I1011 04:19:11.438176 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="326c1bc8-1869-4ca2-8e8b-10356a3ca498" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Oct 11 04:19:11 crc kubenswrapper[4798]: E1011 04:19:11.438263 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="602730bb-ce0f-4773-9988-4b2db4032c10" containerName="extract-content" Oct 11 04:19:11 crc kubenswrapper[4798]: I1011 04:19:11.438361 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="602730bb-ce0f-4773-9988-4b2db4032c10" containerName="extract-content" Oct 11 04:19:11 crc kubenswrapper[4798]: E1011 04:19:11.438470 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="602730bb-ce0f-4773-9988-4b2db4032c10" containerName="registry-server" Oct 11 04:19:11 crc kubenswrapper[4798]: I1011 04:19:11.438526 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="602730bb-ce0f-4773-9988-4b2db4032c10" containerName="registry-server" Oct 11 04:19:11 crc kubenswrapper[4798]: E1011 04:19:11.438618 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="602730bb-ce0f-4773-9988-4b2db4032c10" containerName="extract-utilities" Oct 11 04:19:11 crc kubenswrapper[4798]: I1011 04:19:11.438677 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="602730bb-ce0f-4773-9988-4b2db4032c10" containerName="extract-utilities" Oct 11 04:19:11 crc kubenswrapper[4798]: I1011 04:19:11.438918 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="602730bb-ce0f-4773-9988-4b2db4032c10" containerName="registry-server" Oct 11 04:19:11 crc kubenswrapper[4798]: I1011 04:19:11.438997 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="326c1bc8-1869-4ca2-8e8b-10356a3ca498" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Oct 11 04:19:11 crc kubenswrapper[4798]: I1011 04:19:11.439691 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-bth9j" Oct 11 04:19:11 crc kubenswrapper[4798]: I1011 04:19:11.440101 4798 scope.go:117] "RemoveContainer" containerID="823c2f1eeb519e1e6fc9918a579172a77499bfea6288b24129c808a89fb8d0b5" Oct 11 04:19:11 crc kubenswrapper[4798]: I1011 04:19:11.443016 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Oct 11 04:19:11 crc kubenswrapper[4798]: I1011 04:19:11.443330 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Oct 11 04:19:11 crc kubenswrapper[4798]: I1011 04:19:11.443857 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-mgsh6" Oct 11 04:19:11 crc kubenswrapper[4798]: I1011 04:19:11.444237 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Oct 11 04:19:11 crc kubenswrapper[4798]: I1011 04:19:11.444384 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-bth9j"] Oct 11 04:19:11 crc kubenswrapper[4798]: I1011 04:19:11.502336 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a0453dd0-3348-43e2-8489-681c44e089a1-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-bth9j\" (UID: \"a0453dd0-3348-43e2-8489-681c44e089a1\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-bth9j" Oct 11 04:19:11 crc kubenswrapper[4798]: I1011 04:19:11.502761 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qxcfl\" (UniqueName: \"kubernetes.io/projected/a0453dd0-3348-43e2-8489-681c44e089a1-kube-api-access-qxcfl\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-bth9j\" (UID: \"a0453dd0-3348-43e2-8489-681c44e089a1\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-bth9j" Oct 11 04:19:11 crc kubenswrapper[4798]: I1011 04:19:11.502904 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a0453dd0-3348-43e2-8489-681c44e089a1-ssh-key\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-bth9j\" (UID: \"a0453dd0-3348-43e2-8489-681c44e089a1\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-bth9j" Oct 11 04:19:11 crc kubenswrapper[4798]: I1011 04:19:11.521559 4798 scope.go:117] "RemoveContainer" containerID="d3276b94676d018cf356cb727fc9537f98dfc282a6ea6db9f104454c33e89186" Oct 11 04:19:11 crc kubenswrapper[4798]: E1011 04:19:11.521927 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d3276b94676d018cf356cb727fc9537f98dfc282a6ea6db9f104454c33e89186\": container with ID starting with d3276b94676d018cf356cb727fc9537f98dfc282a6ea6db9f104454c33e89186 not found: ID does not exist" containerID="d3276b94676d018cf356cb727fc9537f98dfc282a6ea6db9f104454c33e89186" Oct 11 04:19:11 crc kubenswrapper[4798]: I1011 04:19:11.521971 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d3276b94676d018cf356cb727fc9537f98dfc282a6ea6db9f104454c33e89186"} err="failed to get container status \"d3276b94676d018cf356cb727fc9537f98dfc282a6ea6db9f104454c33e89186\": rpc error: code = NotFound desc = could not find container \"d3276b94676d018cf356cb727fc9537f98dfc282a6ea6db9f104454c33e89186\": container with ID starting with d3276b94676d018cf356cb727fc9537f98dfc282a6ea6db9f104454c33e89186 not found: ID does not exist" Oct 11 04:19:11 crc kubenswrapper[4798]: I1011 04:19:11.522001 4798 scope.go:117] "RemoveContainer" containerID="a4b309338570f9b38d6561ace44e199a2a52e58c786fc6f24ee10716a3781b96" Oct 11 04:19:11 crc kubenswrapper[4798]: E1011 04:19:11.522245 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a4b309338570f9b38d6561ace44e199a2a52e58c786fc6f24ee10716a3781b96\": container with ID starting with a4b309338570f9b38d6561ace44e199a2a52e58c786fc6f24ee10716a3781b96 not found: ID does not exist" containerID="a4b309338570f9b38d6561ace44e199a2a52e58c786fc6f24ee10716a3781b96" Oct 11 04:19:11 crc kubenswrapper[4798]: I1011 04:19:11.522271 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a4b309338570f9b38d6561ace44e199a2a52e58c786fc6f24ee10716a3781b96"} err="failed to get container status \"a4b309338570f9b38d6561ace44e199a2a52e58c786fc6f24ee10716a3781b96\": rpc error: code = NotFound desc = could not find container \"a4b309338570f9b38d6561ace44e199a2a52e58c786fc6f24ee10716a3781b96\": container with ID starting with a4b309338570f9b38d6561ace44e199a2a52e58c786fc6f24ee10716a3781b96 not found: ID does not exist" Oct 11 04:19:11 crc kubenswrapper[4798]: I1011 04:19:11.522285 4798 scope.go:117] "RemoveContainer" containerID="823c2f1eeb519e1e6fc9918a579172a77499bfea6288b24129c808a89fb8d0b5" Oct 11 04:19:11 crc kubenswrapper[4798]: E1011 04:19:11.522507 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"823c2f1eeb519e1e6fc9918a579172a77499bfea6288b24129c808a89fb8d0b5\": container with ID starting with 823c2f1eeb519e1e6fc9918a579172a77499bfea6288b24129c808a89fb8d0b5 not found: ID does not exist" containerID="823c2f1eeb519e1e6fc9918a579172a77499bfea6288b24129c808a89fb8d0b5" Oct 11 04:19:11 crc kubenswrapper[4798]: I1011 04:19:11.522529 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"823c2f1eeb519e1e6fc9918a579172a77499bfea6288b24129c808a89fb8d0b5"} err="failed to get container status \"823c2f1eeb519e1e6fc9918a579172a77499bfea6288b24129c808a89fb8d0b5\": rpc error: code = NotFound desc = could not find container \"823c2f1eeb519e1e6fc9918a579172a77499bfea6288b24129c808a89fb8d0b5\": container with ID starting with 823c2f1eeb519e1e6fc9918a579172a77499bfea6288b24129c808a89fb8d0b5 not found: ID does not exist" Oct 11 04:19:11 crc kubenswrapper[4798]: I1011 04:19:11.604286 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a0453dd0-3348-43e2-8489-681c44e089a1-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-bth9j\" (UID: \"a0453dd0-3348-43e2-8489-681c44e089a1\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-bth9j" Oct 11 04:19:11 crc kubenswrapper[4798]: I1011 04:19:11.604434 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qxcfl\" (UniqueName: \"kubernetes.io/projected/a0453dd0-3348-43e2-8489-681c44e089a1-kube-api-access-qxcfl\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-bth9j\" (UID: \"a0453dd0-3348-43e2-8489-681c44e089a1\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-bth9j" Oct 11 04:19:11 crc kubenswrapper[4798]: I1011 04:19:11.604470 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a0453dd0-3348-43e2-8489-681c44e089a1-ssh-key\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-bth9j\" (UID: \"a0453dd0-3348-43e2-8489-681c44e089a1\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-bth9j" Oct 11 04:19:11 crc kubenswrapper[4798]: I1011 04:19:11.608336 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a0453dd0-3348-43e2-8489-681c44e089a1-ssh-key\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-bth9j\" (UID: \"a0453dd0-3348-43e2-8489-681c44e089a1\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-bth9j" Oct 11 04:19:11 crc kubenswrapper[4798]: I1011 04:19:11.608621 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a0453dd0-3348-43e2-8489-681c44e089a1-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-bth9j\" (UID: \"a0453dd0-3348-43e2-8489-681c44e089a1\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-bth9j" Oct 11 04:19:11 crc kubenswrapper[4798]: I1011 04:19:11.620562 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qxcfl\" (UniqueName: \"kubernetes.io/projected/a0453dd0-3348-43e2-8489-681c44e089a1-kube-api-access-qxcfl\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-bth9j\" (UID: \"a0453dd0-3348-43e2-8489-681c44e089a1\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-bth9j" Oct 11 04:19:11 crc kubenswrapper[4798]: I1011 04:19:11.817543 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-bth9j" Oct 11 04:19:12 crc kubenswrapper[4798]: I1011 04:19:12.317188 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-bth9j"] Oct 11 04:19:12 crc kubenswrapper[4798]: I1011 04:19:12.331114 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-78pxx"] Oct 11 04:19:12 crc kubenswrapper[4798]: I1011 04:19:12.333217 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-78pxx" Oct 11 04:19:12 crc kubenswrapper[4798]: I1011 04:19:12.352104 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-78pxx"] Oct 11 04:19:12 crc kubenswrapper[4798]: I1011 04:19:12.356545 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-bth9j" event={"ID":"a0453dd0-3348-43e2-8489-681c44e089a1","Type":"ContainerStarted","Data":"a51cd6be31e852253f3906df1f2b2bcf3b68b482811fc333c2abec80fb0df24f"} Oct 11 04:19:12 crc kubenswrapper[4798]: I1011 04:19:12.420247 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/729e6e63-bc38-4068-a9a3-3ecac3079051-catalog-content\") pod \"community-operators-78pxx\" (UID: \"729e6e63-bc38-4068-a9a3-3ecac3079051\") " pod="openshift-marketplace/community-operators-78pxx" Oct 11 04:19:12 crc kubenswrapper[4798]: I1011 04:19:12.420548 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/729e6e63-bc38-4068-a9a3-3ecac3079051-utilities\") pod \"community-operators-78pxx\" (UID: \"729e6e63-bc38-4068-a9a3-3ecac3079051\") " pod="openshift-marketplace/community-operators-78pxx" Oct 11 04:19:12 crc kubenswrapper[4798]: I1011 04:19:12.420662 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sxjg8\" (UniqueName: \"kubernetes.io/projected/729e6e63-bc38-4068-a9a3-3ecac3079051-kube-api-access-sxjg8\") pod \"community-operators-78pxx\" (UID: \"729e6e63-bc38-4068-a9a3-3ecac3079051\") " pod="openshift-marketplace/community-operators-78pxx" Oct 11 04:19:12 crc kubenswrapper[4798]: I1011 04:19:12.510781 4798 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-r4zbt" Oct 11 04:19:12 crc kubenswrapper[4798]: I1011 04:19:12.521854 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/729e6e63-bc38-4068-a9a3-3ecac3079051-catalog-content\") pod \"community-operators-78pxx\" (UID: \"729e6e63-bc38-4068-a9a3-3ecac3079051\") " pod="openshift-marketplace/community-operators-78pxx" Oct 11 04:19:12 crc kubenswrapper[4798]: I1011 04:19:12.521979 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/729e6e63-bc38-4068-a9a3-3ecac3079051-utilities\") pod \"community-operators-78pxx\" (UID: \"729e6e63-bc38-4068-a9a3-3ecac3079051\") " pod="openshift-marketplace/community-operators-78pxx" Oct 11 04:19:12 crc kubenswrapper[4798]: I1011 04:19:12.522302 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/729e6e63-bc38-4068-a9a3-3ecac3079051-catalog-content\") pod \"community-operators-78pxx\" (UID: \"729e6e63-bc38-4068-a9a3-3ecac3079051\") " pod="openshift-marketplace/community-operators-78pxx" Oct 11 04:19:12 crc kubenswrapper[4798]: I1011 04:19:12.522470 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sxjg8\" (UniqueName: \"kubernetes.io/projected/729e6e63-bc38-4068-a9a3-3ecac3079051-kube-api-access-sxjg8\") pod \"community-operators-78pxx\" (UID: \"729e6e63-bc38-4068-a9a3-3ecac3079051\") " pod="openshift-marketplace/community-operators-78pxx" Oct 11 04:19:12 crc kubenswrapper[4798]: I1011 04:19:12.522820 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/729e6e63-bc38-4068-a9a3-3ecac3079051-utilities\") pod \"community-operators-78pxx\" (UID: \"729e6e63-bc38-4068-a9a3-3ecac3079051\") " pod="openshift-marketplace/community-operators-78pxx" Oct 11 04:19:12 crc kubenswrapper[4798]: I1011 04:19:12.562571 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-r4zbt" Oct 11 04:19:12 crc kubenswrapper[4798]: I1011 04:19:12.563840 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sxjg8\" (UniqueName: \"kubernetes.io/projected/729e6e63-bc38-4068-a9a3-3ecac3079051-kube-api-access-sxjg8\") pod \"community-operators-78pxx\" (UID: \"729e6e63-bc38-4068-a9a3-3ecac3079051\") " pod="openshift-marketplace/community-operators-78pxx" Oct 11 04:19:12 crc kubenswrapper[4798]: I1011 04:19:12.669589 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-78pxx" Oct 11 04:19:13 crc kubenswrapper[4798]: I1011 04:19:13.194603 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-78pxx"] Oct 11 04:19:13 crc kubenswrapper[4798]: W1011 04:19:13.204543 4798 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod729e6e63_bc38_4068_a9a3_3ecac3079051.slice/crio-65a10345b0b88c1623cdca61eb3fa347c1d03a34914809a9b01b68b0f454ff3d WatchSource:0}: Error finding container 65a10345b0b88c1623cdca61eb3fa347c1d03a34914809a9b01b68b0f454ff3d: Status 404 returned error can't find the container with id 65a10345b0b88c1623cdca61eb3fa347c1d03a34914809a9b01b68b0f454ff3d Oct 11 04:19:13 crc kubenswrapper[4798]: I1011 04:19:13.366063 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-bth9j" event={"ID":"a0453dd0-3348-43e2-8489-681c44e089a1","Type":"ContainerStarted","Data":"a1a6b43eace6f5289328a7e4d9afa14af45b217f64f08807ffa9d0292b95ecf0"} Oct 11 04:19:13 crc kubenswrapper[4798]: I1011 04:19:13.368445 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-78pxx" event={"ID":"729e6e63-bc38-4068-a9a3-3ecac3079051","Type":"ContainerStarted","Data":"185f7181f59e8a457be8755d8193eb952a41288dbe522c97dfd798ea917af725"} Oct 11 04:19:13 crc kubenswrapper[4798]: I1011 04:19:13.368489 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-78pxx" event={"ID":"729e6e63-bc38-4068-a9a3-3ecac3079051","Type":"ContainerStarted","Data":"65a10345b0b88c1623cdca61eb3fa347c1d03a34914809a9b01b68b0f454ff3d"} Oct 11 04:19:13 crc kubenswrapper[4798]: I1011 04:19:13.386843 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-bth9j" podStartSLOduration=1.982449748 podStartE2EDuration="2.386826705s" podCreationTimestamp="2025-10-11 04:19:11 +0000 UTC" firstStartedPulling="2025-10-11 04:19:12.314617555 +0000 UTC m=+1447.650907251" lastFinishedPulling="2025-10-11 04:19:12.718994522 +0000 UTC m=+1448.055284208" observedRunningTime="2025-10-11 04:19:13.380634186 +0000 UTC m=+1448.716923882" watchObservedRunningTime="2025-10-11 04:19:13.386826705 +0000 UTC m=+1448.723116391" Oct 11 04:19:14 crc kubenswrapper[4798]: I1011 04:19:14.384483 4798 generic.go:334] "Generic (PLEG): container finished" podID="729e6e63-bc38-4068-a9a3-3ecac3079051" containerID="185f7181f59e8a457be8755d8193eb952a41288dbe522c97dfd798ea917af725" exitCode=0 Oct 11 04:19:14 crc kubenswrapper[4798]: I1011 04:19:14.384562 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-78pxx" event={"ID":"729e6e63-bc38-4068-a9a3-3ecac3079051","Type":"ContainerDied","Data":"185f7181f59e8a457be8755d8193eb952a41288dbe522c97dfd798ea917af725"} Oct 11 04:19:15 crc kubenswrapper[4798]: I1011 04:19:15.709213 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-r4zbt"] Oct 11 04:19:15 crc kubenswrapper[4798]: I1011 04:19:15.709863 4798 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-r4zbt" podUID="4b54f72e-e8dc-4991-be77-fe72d69a0cb8" containerName="registry-server" containerID="cri-o://1a69778b5764708eae872fc81613ecdd3edd187c4477db132cbaaf7ae20848f3" gracePeriod=2 Oct 11 04:19:16 crc kubenswrapper[4798]: I1011 04:19:16.172468 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-r4zbt" Oct 11 04:19:16 crc kubenswrapper[4798]: I1011 04:19:16.300674 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4b54f72e-e8dc-4991-be77-fe72d69a0cb8-utilities\") pod \"4b54f72e-e8dc-4991-be77-fe72d69a0cb8\" (UID: \"4b54f72e-e8dc-4991-be77-fe72d69a0cb8\") " Oct 11 04:19:16 crc kubenswrapper[4798]: I1011 04:19:16.300789 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hclzk\" (UniqueName: \"kubernetes.io/projected/4b54f72e-e8dc-4991-be77-fe72d69a0cb8-kube-api-access-hclzk\") pod \"4b54f72e-e8dc-4991-be77-fe72d69a0cb8\" (UID: \"4b54f72e-e8dc-4991-be77-fe72d69a0cb8\") " Oct 11 04:19:16 crc kubenswrapper[4798]: I1011 04:19:16.300828 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4b54f72e-e8dc-4991-be77-fe72d69a0cb8-catalog-content\") pod \"4b54f72e-e8dc-4991-be77-fe72d69a0cb8\" (UID: \"4b54f72e-e8dc-4991-be77-fe72d69a0cb8\") " Oct 11 04:19:16 crc kubenswrapper[4798]: I1011 04:19:16.301815 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4b54f72e-e8dc-4991-be77-fe72d69a0cb8-utilities" (OuterVolumeSpecName: "utilities") pod "4b54f72e-e8dc-4991-be77-fe72d69a0cb8" (UID: "4b54f72e-e8dc-4991-be77-fe72d69a0cb8"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 04:19:16 crc kubenswrapper[4798]: I1011 04:19:16.307512 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4b54f72e-e8dc-4991-be77-fe72d69a0cb8-kube-api-access-hclzk" (OuterVolumeSpecName: "kube-api-access-hclzk") pod "4b54f72e-e8dc-4991-be77-fe72d69a0cb8" (UID: "4b54f72e-e8dc-4991-be77-fe72d69a0cb8"). InnerVolumeSpecName "kube-api-access-hclzk". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 04:19:16 crc kubenswrapper[4798]: I1011 04:19:16.373144 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4b54f72e-e8dc-4991-be77-fe72d69a0cb8-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "4b54f72e-e8dc-4991-be77-fe72d69a0cb8" (UID: "4b54f72e-e8dc-4991-be77-fe72d69a0cb8"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 04:19:16 crc kubenswrapper[4798]: I1011 04:19:16.403811 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hclzk\" (UniqueName: \"kubernetes.io/projected/4b54f72e-e8dc-4991-be77-fe72d69a0cb8-kube-api-access-hclzk\") on node \"crc\" DevicePath \"\"" Oct 11 04:19:16 crc kubenswrapper[4798]: I1011 04:19:16.403859 4798 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4b54f72e-e8dc-4991-be77-fe72d69a0cb8-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 11 04:19:16 crc kubenswrapper[4798]: I1011 04:19:16.403878 4798 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4b54f72e-e8dc-4991-be77-fe72d69a0cb8-utilities\") on node \"crc\" DevicePath \"\"" Oct 11 04:19:16 crc kubenswrapper[4798]: I1011 04:19:16.417002 4798 generic.go:334] "Generic (PLEG): container finished" podID="729e6e63-bc38-4068-a9a3-3ecac3079051" containerID="4250a9bb1be0cd67ef65b868da266dd21c026b939f4457e5017b7e58dd741704" exitCode=0 Oct 11 04:19:16 crc kubenswrapper[4798]: I1011 04:19:16.417043 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-78pxx" event={"ID":"729e6e63-bc38-4068-a9a3-3ecac3079051","Type":"ContainerDied","Data":"4250a9bb1be0cd67ef65b868da266dd21c026b939f4457e5017b7e58dd741704"} Oct 11 04:19:16 crc kubenswrapper[4798]: I1011 04:19:16.427588 4798 generic.go:334] "Generic (PLEG): container finished" podID="4b54f72e-e8dc-4991-be77-fe72d69a0cb8" containerID="1a69778b5764708eae872fc81613ecdd3edd187c4477db132cbaaf7ae20848f3" exitCode=0 Oct 11 04:19:16 crc kubenswrapper[4798]: I1011 04:19:16.427637 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-r4zbt" event={"ID":"4b54f72e-e8dc-4991-be77-fe72d69a0cb8","Type":"ContainerDied","Data":"1a69778b5764708eae872fc81613ecdd3edd187c4477db132cbaaf7ae20848f3"} Oct 11 04:19:16 crc kubenswrapper[4798]: I1011 04:19:16.427705 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-r4zbt" event={"ID":"4b54f72e-e8dc-4991-be77-fe72d69a0cb8","Type":"ContainerDied","Data":"42517aa15023351dc1b0dc0b4d27df237dd91fcb3b9cfd0360438cbef128ce5f"} Oct 11 04:19:16 crc kubenswrapper[4798]: I1011 04:19:16.427728 4798 scope.go:117] "RemoveContainer" containerID="1a69778b5764708eae872fc81613ecdd3edd187c4477db132cbaaf7ae20848f3" Oct 11 04:19:16 crc kubenswrapper[4798]: I1011 04:19:16.427667 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-r4zbt" Oct 11 04:19:16 crc kubenswrapper[4798]: I1011 04:19:16.466499 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-r4zbt"] Oct 11 04:19:16 crc kubenswrapper[4798]: I1011 04:19:16.472275 4798 scope.go:117] "RemoveContainer" containerID="770590cc6a0a77c14539d83310edcff691200d126748eda186436ecb6441b7b6" Oct 11 04:19:16 crc kubenswrapper[4798]: I1011 04:19:16.474177 4798 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-r4zbt"] Oct 11 04:19:16 crc kubenswrapper[4798]: I1011 04:19:16.495275 4798 scope.go:117] "RemoveContainer" containerID="8b777494f9ac6b46fa9e649ca87a25fde254c3e01fc44695b011efb64b62d207" Oct 11 04:19:16 crc kubenswrapper[4798]: I1011 04:19:16.518186 4798 scope.go:117] "RemoveContainer" containerID="1a69778b5764708eae872fc81613ecdd3edd187c4477db132cbaaf7ae20848f3" Oct 11 04:19:16 crc kubenswrapper[4798]: E1011 04:19:16.518655 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1a69778b5764708eae872fc81613ecdd3edd187c4477db132cbaaf7ae20848f3\": container with ID starting with 1a69778b5764708eae872fc81613ecdd3edd187c4477db132cbaaf7ae20848f3 not found: ID does not exist" containerID="1a69778b5764708eae872fc81613ecdd3edd187c4477db132cbaaf7ae20848f3" Oct 11 04:19:16 crc kubenswrapper[4798]: I1011 04:19:16.518704 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1a69778b5764708eae872fc81613ecdd3edd187c4477db132cbaaf7ae20848f3"} err="failed to get container status \"1a69778b5764708eae872fc81613ecdd3edd187c4477db132cbaaf7ae20848f3\": rpc error: code = NotFound desc = could not find container \"1a69778b5764708eae872fc81613ecdd3edd187c4477db132cbaaf7ae20848f3\": container with ID starting with 1a69778b5764708eae872fc81613ecdd3edd187c4477db132cbaaf7ae20848f3 not found: ID does not exist" Oct 11 04:19:16 crc kubenswrapper[4798]: I1011 04:19:16.518724 4798 scope.go:117] "RemoveContainer" containerID="770590cc6a0a77c14539d83310edcff691200d126748eda186436ecb6441b7b6" Oct 11 04:19:16 crc kubenswrapper[4798]: E1011 04:19:16.519127 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"770590cc6a0a77c14539d83310edcff691200d126748eda186436ecb6441b7b6\": container with ID starting with 770590cc6a0a77c14539d83310edcff691200d126748eda186436ecb6441b7b6 not found: ID does not exist" containerID="770590cc6a0a77c14539d83310edcff691200d126748eda186436ecb6441b7b6" Oct 11 04:19:16 crc kubenswrapper[4798]: I1011 04:19:16.519170 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"770590cc6a0a77c14539d83310edcff691200d126748eda186436ecb6441b7b6"} err="failed to get container status \"770590cc6a0a77c14539d83310edcff691200d126748eda186436ecb6441b7b6\": rpc error: code = NotFound desc = could not find container \"770590cc6a0a77c14539d83310edcff691200d126748eda186436ecb6441b7b6\": container with ID starting with 770590cc6a0a77c14539d83310edcff691200d126748eda186436ecb6441b7b6 not found: ID does not exist" Oct 11 04:19:16 crc kubenswrapper[4798]: I1011 04:19:16.519182 4798 scope.go:117] "RemoveContainer" containerID="8b777494f9ac6b46fa9e649ca87a25fde254c3e01fc44695b011efb64b62d207" Oct 11 04:19:16 crc kubenswrapper[4798]: E1011 04:19:16.519466 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8b777494f9ac6b46fa9e649ca87a25fde254c3e01fc44695b011efb64b62d207\": container with ID starting with 8b777494f9ac6b46fa9e649ca87a25fde254c3e01fc44695b011efb64b62d207 not found: ID does not exist" containerID="8b777494f9ac6b46fa9e649ca87a25fde254c3e01fc44695b011efb64b62d207" Oct 11 04:19:16 crc kubenswrapper[4798]: I1011 04:19:16.519511 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8b777494f9ac6b46fa9e649ca87a25fde254c3e01fc44695b011efb64b62d207"} err="failed to get container status \"8b777494f9ac6b46fa9e649ca87a25fde254c3e01fc44695b011efb64b62d207\": rpc error: code = NotFound desc = could not find container \"8b777494f9ac6b46fa9e649ca87a25fde254c3e01fc44695b011efb64b62d207\": container with ID starting with 8b777494f9ac6b46fa9e649ca87a25fde254c3e01fc44695b011efb64b62d207 not found: ID does not exist" Oct 11 04:19:17 crc kubenswrapper[4798]: I1011 04:19:17.434202 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4b54f72e-e8dc-4991-be77-fe72d69a0cb8" path="/var/lib/kubelet/pods/4b54f72e-e8dc-4991-be77-fe72d69a0cb8/volumes" Oct 11 04:19:17 crc kubenswrapper[4798]: I1011 04:19:17.437752 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-78pxx" event={"ID":"729e6e63-bc38-4068-a9a3-3ecac3079051","Type":"ContainerStarted","Data":"3da9cff0bb4cd27bfe8b8bef607cda6453cb64d90a3001831e5eaf2de31e9a00"} Oct 11 04:19:17 crc kubenswrapper[4798]: I1011 04:19:17.462424 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-78pxx" podStartSLOduration=2.873943023 podStartE2EDuration="5.462373541s" podCreationTimestamp="2025-10-11 04:19:12 +0000 UTC" firstStartedPulling="2025-10-11 04:19:14.387310065 +0000 UTC m=+1449.723599761" lastFinishedPulling="2025-10-11 04:19:16.975740603 +0000 UTC m=+1452.312030279" observedRunningTime="2025-10-11 04:19:17.454416691 +0000 UTC m=+1452.790706407" watchObservedRunningTime="2025-10-11 04:19:17.462373541 +0000 UTC m=+1452.798663247" Oct 11 04:19:22 crc kubenswrapper[4798]: I1011 04:19:22.670654 4798 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-78pxx" Oct 11 04:19:22 crc kubenswrapper[4798]: I1011 04:19:22.673505 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-78pxx" Oct 11 04:19:22 crc kubenswrapper[4798]: I1011 04:19:22.726803 4798 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-78pxx" Oct 11 04:19:23 crc kubenswrapper[4798]: I1011 04:19:23.564005 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-78pxx" Oct 11 04:19:23 crc kubenswrapper[4798]: I1011 04:19:23.613787 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-78pxx"] Oct 11 04:19:25 crc kubenswrapper[4798]: I1011 04:19:25.526315 4798 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-78pxx" podUID="729e6e63-bc38-4068-a9a3-3ecac3079051" containerName="registry-server" containerID="cri-o://3da9cff0bb4cd27bfe8b8bef607cda6453cb64d90a3001831e5eaf2de31e9a00" gracePeriod=2 Oct 11 04:19:25 crc kubenswrapper[4798]: I1011 04:19:25.987423 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-78pxx" Oct 11 04:19:26 crc kubenswrapper[4798]: I1011 04:19:26.091472 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/729e6e63-bc38-4068-a9a3-3ecac3079051-catalog-content\") pod \"729e6e63-bc38-4068-a9a3-3ecac3079051\" (UID: \"729e6e63-bc38-4068-a9a3-3ecac3079051\") " Oct 11 04:19:26 crc kubenswrapper[4798]: I1011 04:19:26.091558 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sxjg8\" (UniqueName: \"kubernetes.io/projected/729e6e63-bc38-4068-a9a3-3ecac3079051-kube-api-access-sxjg8\") pod \"729e6e63-bc38-4068-a9a3-3ecac3079051\" (UID: \"729e6e63-bc38-4068-a9a3-3ecac3079051\") " Oct 11 04:19:26 crc kubenswrapper[4798]: I1011 04:19:26.091626 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/729e6e63-bc38-4068-a9a3-3ecac3079051-utilities\") pod \"729e6e63-bc38-4068-a9a3-3ecac3079051\" (UID: \"729e6e63-bc38-4068-a9a3-3ecac3079051\") " Oct 11 04:19:26 crc kubenswrapper[4798]: I1011 04:19:26.092631 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/729e6e63-bc38-4068-a9a3-3ecac3079051-utilities" (OuterVolumeSpecName: "utilities") pod "729e6e63-bc38-4068-a9a3-3ecac3079051" (UID: "729e6e63-bc38-4068-a9a3-3ecac3079051"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 04:19:26 crc kubenswrapper[4798]: I1011 04:19:26.097875 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/729e6e63-bc38-4068-a9a3-3ecac3079051-kube-api-access-sxjg8" (OuterVolumeSpecName: "kube-api-access-sxjg8") pod "729e6e63-bc38-4068-a9a3-3ecac3079051" (UID: "729e6e63-bc38-4068-a9a3-3ecac3079051"). InnerVolumeSpecName "kube-api-access-sxjg8". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 04:19:26 crc kubenswrapper[4798]: I1011 04:19:26.193440 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sxjg8\" (UniqueName: \"kubernetes.io/projected/729e6e63-bc38-4068-a9a3-3ecac3079051-kube-api-access-sxjg8\") on node \"crc\" DevicePath \"\"" Oct 11 04:19:26 crc kubenswrapper[4798]: I1011 04:19:26.193483 4798 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/729e6e63-bc38-4068-a9a3-3ecac3079051-utilities\") on node \"crc\" DevicePath \"\"" Oct 11 04:19:26 crc kubenswrapper[4798]: I1011 04:19:26.549000 4798 generic.go:334] "Generic (PLEG): container finished" podID="729e6e63-bc38-4068-a9a3-3ecac3079051" containerID="3da9cff0bb4cd27bfe8b8bef607cda6453cb64d90a3001831e5eaf2de31e9a00" exitCode=0 Oct 11 04:19:26 crc kubenswrapper[4798]: I1011 04:19:26.549057 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-78pxx" Oct 11 04:19:26 crc kubenswrapper[4798]: I1011 04:19:26.549086 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-78pxx" event={"ID":"729e6e63-bc38-4068-a9a3-3ecac3079051","Type":"ContainerDied","Data":"3da9cff0bb4cd27bfe8b8bef607cda6453cb64d90a3001831e5eaf2de31e9a00"} Oct 11 04:19:26 crc kubenswrapper[4798]: I1011 04:19:26.549229 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-78pxx" event={"ID":"729e6e63-bc38-4068-a9a3-3ecac3079051","Type":"ContainerDied","Data":"65a10345b0b88c1623cdca61eb3fa347c1d03a34914809a9b01b68b0f454ff3d"} Oct 11 04:19:26 crc kubenswrapper[4798]: I1011 04:19:26.549281 4798 scope.go:117] "RemoveContainer" containerID="3da9cff0bb4cd27bfe8b8bef607cda6453cb64d90a3001831e5eaf2de31e9a00" Oct 11 04:19:26 crc kubenswrapper[4798]: I1011 04:19:26.572172 4798 scope.go:117] "RemoveContainer" containerID="4250a9bb1be0cd67ef65b868da266dd21c026b939f4457e5017b7e58dd741704" Oct 11 04:19:26 crc kubenswrapper[4798]: I1011 04:19:26.600058 4798 scope.go:117] "RemoveContainer" containerID="185f7181f59e8a457be8755d8193eb952a41288dbe522c97dfd798ea917af725" Oct 11 04:19:26 crc kubenswrapper[4798]: I1011 04:19:26.643072 4798 scope.go:117] "RemoveContainer" containerID="3da9cff0bb4cd27bfe8b8bef607cda6453cb64d90a3001831e5eaf2de31e9a00" Oct 11 04:19:26 crc kubenswrapper[4798]: E1011 04:19:26.643623 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3da9cff0bb4cd27bfe8b8bef607cda6453cb64d90a3001831e5eaf2de31e9a00\": container with ID starting with 3da9cff0bb4cd27bfe8b8bef607cda6453cb64d90a3001831e5eaf2de31e9a00 not found: ID does not exist" containerID="3da9cff0bb4cd27bfe8b8bef607cda6453cb64d90a3001831e5eaf2de31e9a00" Oct 11 04:19:26 crc kubenswrapper[4798]: I1011 04:19:26.643656 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3da9cff0bb4cd27bfe8b8bef607cda6453cb64d90a3001831e5eaf2de31e9a00"} err="failed to get container status \"3da9cff0bb4cd27bfe8b8bef607cda6453cb64d90a3001831e5eaf2de31e9a00\": rpc error: code = NotFound desc = could not find container \"3da9cff0bb4cd27bfe8b8bef607cda6453cb64d90a3001831e5eaf2de31e9a00\": container with ID starting with 3da9cff0bb4cd27bfe8b8bef607cda6453cb64d90a3001831e5eaf2de31e9a00 not found: ID does not exist" Oct 11 04:19:26 crc kubenswrapper[4798]: I1011 04:19:26.643680 4798 scope.go:117] "RemoveContainer" containerID="4250a9bb1be0cd67ef65b868da266dd21c026b939f4457e5017b7e58dd741704" Oct 11 04:19:26 crc kubenswrapper[4798]: E1011 04:19:26.644072 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4250a9bb1be0cd67ef65b868da266dd21c026b939f4457e5017b7e58dd741704\": container with ID starting with 4250a9bb1be0cd67ef65b868da266dd21c026b939f4457e5017b7e58dd741704 not found: ID does not exist" containerID="4250a9bb1be0cd67ef65b868da266dd21c026b939f4457e5017b7e58dd741704" Oct 11 04:19:26 crc kubenswrapper[4798]: I1011 04:19:26.644100 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4250a9bb1be0cd67ef65b868da266dd21c026b939f4457e5017b7e58dd741704"} err="failed to get container status \"4250a9bb1be0cd67ef65b868da266dd21c026b939f4457e5017b7e58dd741704\": rpc error: code = NotFound desc = could not find container \"4250a9bb1be0cd67ef65b868da266dd21c026b939f4457e5017b7e58dd741704\": container with ID starting with 4250a9bb1be0cd67ef65b868da266dd21c026b939f4457e5017b7e58dd741704 not found: ID does not exist" Oct 11 04:19:26 crc kubenswrapper[4798]: I1011 04:19:26.644116 4798 scope.go:117] "RemoveContainer" containerID="185f7181f59e8a457be8755d8193eb952a41288dbe522c97dfd798ea917af725" Oct 11 04:19:26 crc kubenswrapper[4798]: E1011 04:19:26.644360 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"185f7181f59e8a457be8755d8193eb952a41288dbe522c97dfd798ea917af725\": container with ID starting with 185f7181f59e8a457be8755d8193eb952a41288dbe522c97dfd798ea917af725 not found: ID does not exist" containerID="185f7181f59e8a457be8755d8193eb952a41288dbe522c97dfd798ea917af725" Oct 11 04:19:26 crc kubenswrapper[4798]: I1011 04:19:26.644381 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"185f7181f59e8a457be8755d8193eb952a41288dbe522c97dfd798ea917af725"} err="failed to get container status \"185f7181f59e8a457be8755d8193eb952a41288dbe522c97dfd798ea917af725\": rpc error: code = NotFound desc = could not find container \"185f7181f59e8a457be8755d8193eb952a41288dbe522c97dfd798ea917af725\": container with ID starting with 185f7181f59e8a457be8755d8193eb952a41288dbe522c97dfd798ea917af725 not found: ID does not exist" Oct 11 04:19:26 crc kubenswrapper[4798]: I1011 04:19:26.944789 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/729e6e63-bc38-4068-a9a3-3ecac3079051-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "729e6e63-bc38-4068-a9a3-3ecac3079051" (UID: "729e6e63-bc38-4068-a9a3-3ecac3079051"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 04:19:27 crc kubenswrapper[4798]: I1011 04:19:27.008383 4798 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/729e6e63-bc38-4068-a9a3-3ecac3079051-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 11 04:19:27 crc kubenswrapper[4798]: I1011 04:19:27.138330 4798 patch_prober.go:28] interesting pod/machine-config-daemon-h28s2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 11 04:19:27 crc kubenswrapper[4798]: I1011 04:19:27.138705 4798 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 11 04:19:27 crc kubenswrapper[4798]: I1011 04:19:27.190629 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-78pxx"] Oct 11 04:19:27 crc kubenswrapper[4798]: I1011 04:19:27.196904 4798 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-78pxx"] Oct 11 04:19:27 crc kubenswrapper[4798]: I1011 04:19:27.432701 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="729e6e63-bc38-4068-a9a3-3ecac3079051" path="/var/lib/kubelet/pods/729e6e63-bc38-4068-a9a3-3ecac3079051/volumes" Oct 11 04:19:57 crc kubenswrapper[4798]: I1011 04:19:57.138604 4798 patch_prober.go:28] interesting pod/machine-config-daemon-h28s2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 11 04:19:57 crc kubenswrapper[4798]: I1011 04:19:57.139147 4798 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 11 04:19:57 crc kubenswrapper[4798]: I1011 04:19:57.139197 4798 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" Oct 11 04:19:57 crc kubenswrapper[4798]: I1011 04:19:57.140677 4798 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"2e8e169485a90028020fcd80b69a1027ed01adf533cd6bd26b4fff5a221bbbae"} pod="openshift-machine-config-operator/machine-config-daemon-h28s2" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Oct 11 04:19:57 crc kubenswrapper[4798]: I1011 04:19:57.140729 4798 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" containerName="machine-config-daemon" containerID="cri-o://2e8e169485a90028020fcd80b69a1027ed01adf533cd6bd26b4fff5a221bbbae" gracePeriod=600 Oct 11 04:19:57 crc kubenswrapper[4798]: E1011 04:19:57.264055 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h28s2_openshift-machine-config-operator(42571bc8-2186-4e3b-bba9-28f5a8f364d0)\"" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" Oct 11 04:19:57 crc kubenswrapper[4798]: I1011 04:19:57.919065 4798 generic.go:334] "Generic (PLEG): container finished" podID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" containerID="2e8e169485a90028020fcd80b69a1027ed01adf533cd6bd26b4fff5a221bbbae" exitCode=0 Oct 11 04:19:57 crc kubenswrapper[4798]: I1011 04:19:57.919108 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" event={"ID":"42571bc8-2186-4e3b-bba9-28f5a8f364d0","Type":"ContainerDied","Data":"2e8e169485a90028020fcd80b69a1027ed01adf533cd6bd26b4fff5a221bbbae"} Oct 11 04:19:57 crc kubenswrapper[4798]: I1011 04:19:57.919512 4798 scope.go:117] "RemoveContainer" containerID="0ede2098e77cd102846ac8838309a8f0eccb5d93df251b4af8b848dec6f5e092" Oct 11 04:19:57 crc kubenswrapper[4798]: I1011 04:19:57.920197 4798 scope.go:117] "RemoveContainer" containerID="2e8e169485a90028020fcd80b69a1027ed01adf533cd6bd26b4fff5a221bbbae" Oct 11 04:19:57 crc kubenswrapper[4798]: E1011 04:19:57.920584 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h28s2_openshift-machine-config-operator(42571bc8-2186-4e3b-bba9-28f5a8f364d0)\"" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" Oct 11 04:20:11 crc kubenswrapper[4798]: I1011 04:20:11.426094 4798 scope.go:117] "RemoveContainer" containerID="2e8e169485a90028020fcd80b69a1027ed01adf533cd6bd26b4fff5a221bbbae" Oct 11 04:20:11 crc kubenswrapper[4798]: E1011 04:20:11.427241 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h28s2_openshift-machine-config-operator(42571bc8-2186-4e3b-bba9-28f5a8f364d0)\"" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" Oct 11 04:20:25 crc kubenswrapper[4798]: I1011 04:20:25.242876 4798 generic.go:334] "Generic (PLEG): container finished" podID="a0453dd0-3348-43e2-8489-681c44e089a1" containerID="a1a6b43eace6f5289328a7e4d9afa14af45b217f64f08807ffa9d0292b95ecf0" exitCode=0 Oct 11 04:20:25 crc kubenswrapper[4798]: I1011 04:20:25.243078 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-bth9j" event={"ID":"a0453dd0-3348-43e2-8489-681c44e089a1","Type":"ContainerDied","Data":"a1a6b43eace6f5289328a7e4d9afa14af45b217f64f08807ffa9d0292b95ecf0"} Oct 11 04:20:27 crc kubenswrapper[4798]: I1011 04:20:26.423630 4798 scope.go:117] "RemoveContainer" containerID="2e8e169485a90028020fcd80b69a1027ed01adf533cd6bd26b4fff5a221bbbae" Oct 11 04:20:27 crc kubenswrapper[4798]: E1011 04:20:26.423880 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h28s2_openshift-machine-config-operator(42571bc8-2186-4e3b-bba9-28f5a8f364d0)\"" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" Oct 11 04:20:27 crc kubenswrapper[4798]: I1011 04:20:26.638914 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-bth9j" Oct 11 04:20:27 crc kubenswrapper[4798]: I1011 04:20:26.651842 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a0453dd0-3348-43e2-8489-681c44e089a1-ssh-key\") pod \"a0453dd0-3348-43e2-8489-681c44e089a1\" (UID: \"a0453dd0-3348-43e2-8489-681c44e089a1\") " Oct 11 04:20:27 crc kubenswrapper[4798]: I1011 04:20:26.652002 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a0453dd0-3348-43e2-8489-681c44e089a1-inventory\") pod \"a0453dd0-3348-43e2-8489-681c44e089a1\" (UID: \"a0453dd0-3348-43e2-8489-681c44e089a1\") " Oct 11 04:20:27 crc kubenswrapper[4798]: I1011 04:20:26.652083 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qxcfl\" (UniqueName: \"kubernetes.io/projected/a0453dd0-3348-43e2-8489-681c44e089a1-kube-api-access-qxcfl\") pod \"a0453dd0-3348-43e2-8489-681c44e089a1\" (UID: \"a0453dd0-3348-43e2-8489-681c44e089a1\") " Oct 11 04:20:27 crc kubenswrapper[4798]: I1011 04:20:26.710658 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a0453dd0-3348-43e2-8489-681c44e089a1-kube-api-access-qxcfl" (OuterVolumeSpecName: "kube-api-access-qxcfl") pod "a0453dd0-3348-43e2-8489-681c44e089a1" (UID: "a0453dd0-3348-43e2-8489-681c44e089a1"). InnerVolumeSpecName "kube-api-access-qxcfl". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 04:20:27 crc kubenswrapper[4798]: I1011 04:20:26.731133 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a0453dd0-3348-43e2-8489-681c44e089a1-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "a0453dd0-3348-43e2-8489-681c44e089a1" (UID: "a0453dd0-3348-43e2-8489-681c44e089a1"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:20:27 crc kubenswrapper[4798]: I1011 04:20:26.733634 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a0453dd0-3348-43e2-8489-681c44e089a1-inventory" (OuterVolumeSpecName: "inventory") pod "a0453dd0-3348-43e2-8489-681c44e089a1" (UID: "a0453dd0-3348-43e2-8489-681c44e089a1"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:20:27 crc kubenswrapper[4798]: I1011 04:20:26.758243 4798 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a0453dd0-3348-43e2-8489-681c44e089a1-inventory\") on node \"crc\" DevicePath \"\"" Oct 11 04:20:27 crc kubenswrapper[4798]: I1011 04:20:26.758265 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qxcfl\" (UniqueName: \"kubernetes.io/projected/a0453dd0-3348-43e2-8489-681c44e089a1-kube-api-access-qxcfl\") on node \"crc\" DevicePath \"\"" Oct 11 04:20:27 crc kubenswrapper[4798]: I1011 04:20:26.758274 4798 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a0453dd0-3348-43e2-8489-681c44e089a1-ssh-key\") on node \"crc\" DevicePath \"\"" Oct 11 04:20:27 crc kubenswrapper[4798]: I1011 04:20:27.263410 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-bth9j" event={"ID":"a0453dd0-3348-43e2-8489-681c44e089a1","Type":"ContainerDied","Data":"a51cd6be31e852253f3906df1f2b2bcf3b68b482811fc333c2abec80fb0df24f"} Oct 11 04:20:27 crc kubenswrapper[4798]: I1011 04:20:27.263727 4798 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a51cd6be31e852253f3906df1f2b2bcf3b68b482811fc333c2abec80fb0df24f" Oct 11 04:20:27 crc kubenswrapper[4798]: I1011 04:20:27.263462 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-bth9j" Oct 11 04:20:27 crc kubenswrapper[4798]: I1011 04:20:27.375742 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-7kmk2"] Oct 11 04:20:27 crc kubenswrapper[4798]: E1011 04:20:27.376252 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4b54f72e-e8dc-4991-be77-fe72d69a0cb8" containerName="extract-content" Oct 11 04:20:27 crc kubenswrapper[4798]: I1011 04:20:27.376274 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="4b54f72e-e8dc-4991-be77-fe72d69a0cb8" containerName="extract-content" Oct 11 04:20:27 crc kubenswrapper[4798]: E1011 04:20:27.376288 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="729e6e63-bc38-4068-a9a3-3ecac3079051" containerName="extract-content" Oct 11 04:20:27 crc kubenswrapper[4798]: I1011 04:20:27.376296 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="729e6e63-bc38-4068-a9a3-3ecac3079051" containerName="extract-content" Oct 11 04:20:27 crc kubenswrapper[4798]: E1011 04:20:27.376310 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="729e6e63-bc38-4068-a9a3-3ecac3079051" containerName="extract-utilities" Oct 11 04:20:27 crc kubenswrapper[4798]: I1011 04:20:27.376319 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="729e6e63-bc38-4068-a9a3-3ecac3079051" containerName="extract-utilities" Oct 11 04:20:27 crc kubenswrapper[4798]: E1011 04:20:27.376339 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4b54f72e-e8dc-4991-be77-fe72d69a0cb8" containerName="registry-server" Oct 11 04:20:27 crc kubenswrapper[4798]: I1011 04:20:27.376347 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="4b54f72e-e8dc-4991-be77-fe72d69a0cb8" containerName="registry-server" Oct 11 04:20:27 crc kubenswrapper[4798]: E1011 04:20:27.376364 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="729e6e63-bc38-4068-a9a3-3ecac3079051" containerName="registry-server" Oct 11 04:20:27 crc kubenswrapper[4798]: I1011 04:20:27.376373 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="729e6e63-bc38-4068-a9a3-3ecac3079051" containerName="registry-server" Oct 11 04:20:27 crc kubenswrapper[4798]: E1011 04:20:27.376409 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a0453dd0-3348-43e2-8489-681c44e089a1" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Oct 11 04:20:27 crc kubenswrapper[4798]: I1011 04:20:27.376419 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="a0453dd0-3348-43e2-8489-681c44e089a1" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Oct 11 04:20:27 crc kubenswrapper[4798]: E1011 04:20:27.376436 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4b54f72e-e8dc-4991-be77-fe72d69a0cb8" containerName="extract-utilities" Oct 11 04:20:27 crc kubenswrapper[4798]: I1011 04:20:27.376444 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="4b54f72e-e8dc-4991-be77-fe72d69a0cb8" containerName="extract-utilities" Oct 11 04:20:27 crc kubenswrapper[4798]: I1011 04:20:27.376650 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="729e6e63-bc38-4068-a9a3-3ecac3079051" containerName="registry-server" Oct 11 04:20:27 crc kubenswrapper[4798]: I1011 04:20:27.376678 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="4b54f72e-e8dc-4991-be77-fe72d69a0cb8" containerName="registry-server" Oct 11 04:20:27 crc kubenswrapper[4798]: I1011 04:20:27.376696 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="a0453dd0-3348-43e2-8489-681c44e089a1" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Oct 11 04:20:27 crc kubenswrapper[4798]: I1011 04:20:27.377615 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-7kmk2" Oct 11 04:20:27 crc kubenswrapper[4798]: I1011 04:20:27.383738 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Oct 11 04:20:27 crc kubenswrapper[4798]: I1011 04:20:27.384062 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Oct 11 04:20:27 crc kubenswrapper[4798]: I1011 04:20:27.384195 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Oct 11 04:20:27 crc kubenswrapper[4798]: I1011 04:20:27.384301 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-mgsh6" Oct 11 04:20:27 crc kubenswrapper[4798]: I1011 04:20:27.392477 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-7kmk2"] Oct 11 04:20:27 crc kubenswrapper[4798]: I1011 04:20:27.479451 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/8575ef22-7f1e-45b4-aeee-8ece74bb904f-ssh-key\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-7kmk2\" (UID: \"8575ef22-7f1e-45b4-aeee-8ece74bb904f\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-7kmk2" Oct 11 04:20:27 crc kubenswrapper[4798]: I1011 04:20:27.479540 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8575ef22-7f1e-45b4-aeee-8ece74bb904f-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-7kmk2\" (UID: \"8575ef22-7f1e-45b4-aeee-8ece74bb904f\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-7kmk2" Oct 11 04:20:27 crc kubenswrapper[4798]: I1011 04:20:27.479738 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p744k\" (UniqueName: \"kubernetes.io/projected/8575ef22-7f1e-45b4-aeee-8ece74bb904f-kube-api-access-p744k\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-7kmk2\" (UID: \"8575ef22-7f1e-45b4-aeee-8ece74bb904f\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-7kmk2" Oct 11 04:20:27 crc kubenswrapper[4798]: I1011 04:20:27.580455 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p744k\" (UniqueName: \"kubernetes.io/projected/8575ef22-7f1e-45b4-aeee-8ece74bb904f-kube-api-access-p744k\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-7kmk2\" (UID: \"8575ef22-7f1e-45b4-aeee-8ece74bb904f\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-7kmk2" Oct 11 04:20:27 crc kubenswrapper[4798]: I1011 04:20:27.580554 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/8575ef22-7f1e-45b4-aeee-8ece74bb904f-ssh-key\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-7kmk2\" (UID: \"8575ef22-7f1e-45b4-aeee-8ece74bb904f\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-7kmk2" Oct 11 04:20:27 crc kubenswrapper[4798]: I1011 04:20:27.580592 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8575ef22-7f1e-45b4-aeee-8ece74bb904f-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-7kmk2\" (UID: \"8575ef22-7f1e-45b4-aeee-8ece74bb904f\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-7kmk2" Oct 11 04:20:27 crc kubenswrapper[4798]: I1011 04:20:27.585194 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/8575ef22-7f1e-45b4-aeee-8ece74bb904f-ssh-key\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-7kmk2\" (UID: \"8575ef22-7f1e-45b4-aeee-8ece74bb904f\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-7kmk2" Oct 11 04:20:27 crc kubenswrapper[4798]: I1011 04:20:27.589990 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8575ef22-7f1e-45b4-aeee-8ece74bb904f-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-7kmk2\" (UID: \"8575ef22-7f1e-45b4-aeee-8ece74bb904f\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-7kmk2" Oct 11 04:20:27 crc kubenswrapper[4798]: I1011 04:20:27.596527 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p744k\" (UniqueName: \"kubernetes.io/projected/8575ef22-7f1e-45b4-aeee-8ece74bb904f-kube-api-access-p744k\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-7kmk2\" (UID: \"8575ef22-7f1e-45b4-aeee-8ece74bb904f\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-7kmk2" Oct 11 04:20:27 crc kubenswrapper[4798]: I1011 04:20:27.708879 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-7kmk2" Oct 11 04:20:28 crc kubenswrapper[4798]: I1011 04:20:28.248172 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-7kmk2"] Oct 11 04:20:28 crc kubenswrapper[4798]: I1011 04:20:28.276133 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-7kmk2" event={"ID":"8575ef22-7f1e-45b4-aeee-8ece74bb904f","Type":"ContainerStarted","Data":"48c30f2874d75e520765b5697d4a7f5baf5249a86b1398ec337d936e6e3357f2"} Oct 11 04:20:29 crc kubenswrapper[4798]: I1011 04:20:29.285982 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-7kmk2" event={"ID":"8575ef22-7f1e-45b4-aeee-8ece74bb904f","Type":"ContainerStarted","Data":"93817d4d2ebb874672fa2a7ba370655f4907b48c493dd836a8db7a2b00160f52"} Oct 11 04:20:29 crc kubenswrapper[4798]: I1011 04:20:29.307945 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-7kmk2" podStartSLOduration=1.879920833 podStartE2EDuration="2.307925867s" podCreationTimestamp="2025-10-11 04:20:27 +0000 UTC" firstStartedPulling="2025-10-11 04:20:28.252080008 +0000 UTC m=+1523.588369694" lastFinishedPulling="2025-10-11 04:20:28.680085042 +0000 UTC m=+1524.016374728" observedRunningTime="2025-10-11 04:20:29.300890849 +0000 UTC m=+1524.637180555" watchObservedRunningTime="2025-10-11 04:20:29.307925867 +0000 UTC m=+1524.644215553" Oct 11 04:20:34 crc kubenswrapper[4798]: I1011 04:20:34.339205 4798 generic.go:334] "Generic (PLEG): container finished" podID="8575ef22-7f1e-45b4-aeee-8ece74bb904f" containerID="93817d4d2ebb874672fa2a7ba370655f4907b48c493dd836a8db7a2b00160f52" exitCode=0 Oct 11 04:20:34 crc kubenswrapper[4798]: I1011 04:20:34.339299 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-7kmk2" event={"ID":"8575ef22-7f1e-45b4-aeee-8ece74bb904f","Type":"ContainerDied","Data":"93817d4d2ebb874672fa2a7ba370655f4907b48c493dd836a8db7a2b00160f52"} Oct 11 04:20:35 crc kubenswrapper[4798]: I1011 04:20:35.049098 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-create-l79jb"] Oct 11 04:20:35 crc kubenswrapper[4798]: I1011 04:20:35.060506 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-create-2lxjs"] Oct 11 04:20:35 crc kubenswrapper[4798]: I1011 04:20:35.073728 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-create-h2g26"] Oct 11 04:20:35 crc kubenswrapper[4798]: I1011 04:20:35.082556 4798 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-create-2lxjs"] Oct 11 04:20:35 crc kubenswrapper[4798]: I1011 04:20:35.090936 4798 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-create-l79jb"] Oct 11 04:20:35 crc kubenswrapper[4798]: I1011 04:20:35.097897 4798 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-create-h2g26"] Oct 11 04:20:35 crc kubenswrapper[4798]: I1011 04:20:35.438947 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4264e4c3-e92c-4335-948f-565d22a7ba21" path="/var/lib/kubelet/pods/4264e4c3-e92c-4335-948f-565d22a7ba21/volumes" Oct 11 04:20:35 crc kubenswrapper[4798]: I1011 04:20:35.440966 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="92a46d10-49a4-46d5-8344-5d3d910eb5ca" path="/var/lib/kubelet/pods/92a46d10-49a4-46d5-8344-5d3d910eb5ca/volumes" Oct 11 04:20:35 crc kubenswrapper[4798]: I1011 04:20:35.442242 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="facccfd6-9383-4336-be2d-1f56b6f11d25" path="/var/lib/kubelet/pods/facccfd6-9383-4336-be2d-1f56b6f11d25/volumes" Oct 11 04:20:35 crc kubenswrapper[4798]: I1011 04:20:35.840736 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-7kmk2" Oct 11 04:20:35 crc kubenswrapper[4798]: I1011 04:20:35.982755 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8575ef22-7f1e-45b4-aeee-8ece74bb904f-inventory\") pod \"8575ef22-7f1e-45b4-aeee-8ece74bb904f\" (UID: \"8575ef22-7f1e-45b4-aeee-8ece74bb904f\") " Oct 11 04:20:35 crc kubenswrapper[4798]: I1011 04:20:35.983326 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/8575ef22-7f1e-45b4-aeee-8ece74bb904f-ssh-key\") pod \"8575ef22-7f1e-45b4-aeee-8ece74bb904f\" (UID: \"8575ef22-7f1e-45b4-aeee-8ece74bb904f\") " Oct 11 04:20:35 crc kubenswrapper[4798]: I1011 04:20:35.983352 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p744k\" (UniqueName: \"kubernetes.io/projected/8575ef22-7f1e-45b4-aeee-8ece74bb904f-kube-api-access-p744k\") pod \"8575ef22-7f1e-45b4-aeee-8ece74bb904f\" (UID: \"8575ef22-7f1e-45b4-aeee-8ece74bb904f\") " Oct 11 04:20:35 crc kubenswrapper[4798]: I1011 04:20:35.988054 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8575ef22-7f1e-45b4-aeee-8ece74bb904f-kube-api-access-p744k" (OuterVolumeSpecName: "kube-api-access-p744k") pod "8575ef22-7f1e-45b4-aeee-8ece74bb904f" (UID: "8575ef22-7f1e-45b4-aeee-8ece74bb904f"). InnerVolumeSpecName "kube-api-access-p744k". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 04:20:36 crc kubenswrapper[4798]: E1011 04:20:36.004368 4798 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/8575ef22-7f1e-45b4-aeee-8ece74bb904f-ssh-key podName:8575ef22-7f1e-45b4-aeee-8ece74bb904f nodeName:}" failed. No retries permitted until 2025-10-11 04:20:36.504338398 +0000 UTC m=+1531.840628084 (durationBeforeRetry 500ms). Error: error cleaning subPath mounts for volume "ssh-key" (UniqueName: "kubernetes.io/secret/8575ef22-7f1e-45b4-aeee-8ece74bb904f-ssh-key") pod "8575ef22-7f1e-45b4-aeee-8ece74bb904f" (UID: "8575ef22-7f1e-45b4-aeee-8ece74bb904f") : error deleting /var/lib/kubelet/pods/8575ef22-7f1e-45b4-aeee-8ece74bb904f/volume-subpaths: remove /var/lib/kubelet/pods/8575ef22-7f1e-45b4-aeee-8ece74bb904f/volume-subpaths: no such file or directory Oct 11 04:20:36 crc kubenswrapper[4798]: I1011 04:20:36.006757 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8575ef22-7f1e-45b4-aeee-8ece74bb904f-inventory" (OuterVolumeSpecName: "inventory") pod "8575ef22-7f1e-45b4-aeee-8ece74bb904f" (UID: "8575ef22-7f1e-45b4-aeee-8ece74bb904f"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:20:36 crc kubenswrapper[4798]: I1011 04:20:36.085405 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p744k\" (UniqueName: \"kubernetes.io/projected/8575ef22-7f1e-45b4-aeee-8ece74bb904f-kube-api-access-p744k\") on node \"crc\" DevicePath \"\"" Oct 11 04:20:36 crc kubenswrapper[4798]: I1011 04:20:36.085442 4798 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8575ef22-7f1e-45b4-aeee-8ece74bb904f-inventory\") on node \"crc\" DevicePath \"\"" Oct 11 04:20:36 crc kubenswrapper[4798]: I1011 04:20:36.384816 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-7kmk2" event={"ID":"8575ef22-7f1e-45b4-aeee-8ece74bb904f","Type":"ContainerDied","Data":"48c30f2874d75e520765b5697d4a7f5baf5249a86b1398ec337d936e6e3357f2"} Oct 11 04:20:36 crc kubenswrapper[4798]: I1011 04:20:36.384872 4798 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="48c30f2874d75e520765b5697d4a7f5baf5249a86b1398ec337d936e6e3357f2" Oct 11 04:20:36 crc kubenswrapper[4798]: I1011 04:20:36.384963 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-7kmk2" Oct 11 04:20:36 crc kubenswrapper[4798]: I1011 04:20:36.434717 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-7rd8r"] Oct 11 04:20:36 crc kubenswrapper[4798]: E1011 04:20:36.435337 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8575ef22-7f1e-45b4-aeee-8ece74bb904f" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Oct 11 04:20:36 crc kubenswrapper[4798]: I1011 04:20:36.435369 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="8575ef22-7f1e-45b4-aeee-8ece74bb904f" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Oct 11 04:20:36 crc kubenswrapper[4798]: I1011 04:20:36.435777 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="8575ef22-7f1e-45b4-aeee-8ece74bb904f" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Oct 11 04:20:36 crc kubenswrapper[4798]: I1011 04:20:36.436822 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-7rd8r" Oct 11 04:20:36 crc kubenswrapper[4798]: I1011 04:20:36.446519 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-7rd8r"] Oct 11 04:20:36 crc kubenswrapper[4798]: I1011 04:20:36.595151 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/8575ef22-7f1e-45b4-aeee-8ece74bb904f-ssh-key\") pod \"8575ef22-7f1e-45b4-aeee-8ece74bb904f\" (UID: \"8575ef22-7f1e-45b4-aeee-8ece74bb904f\") " Oct 11 04:20:36 crc kubenswrapper[4798]: I1011 04:20:36.595893 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/9342f970-0bdf-41b9-aa43-763845d72472-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-7rd8r\" (UID: \"9342f970-0bdf-41b9-aa43-763845d72472\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-7rd8r" Oct 11 04:20:36 crc kubenswrapper[4798]: I1011 04:20:36.595960 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5xsfj\" (UniqueName: \"kubernetes.io/projected/9342f970-0bdf-41b9-aa43-763845d72472-kube-api-access-5xsfj\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-7rd8r\" (UID: \"9342f970-0bdf-41b9-aa43-763845d72472\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-7rd8r" Oct 11 04:20:36 crc kubenswrapper[4798]: I1011 04:20:36.596132 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/9342f970-0bdf-41b9-aa43-763845d72472-ssh-key\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-7rd8r\" (UID: \"9342f970-0bdf-41b9-aa43-763845d72472\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-7rd8r" Oct 11 04:20:36 crc kubenswrapper[4798]: I1011 04:20:36.606732 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8575ef22-7f1e-45b4-aeee-8ece74bb904f-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "8575ef22-7f1e-45b4-aeee-8ece74bb904f" (UID: "8575ef22-7f1e-45b4-aeee-8ece74bb904f"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:20:36 crc kubenswrapper[4798]: I1011 04:20:36.697063 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/9342f970-0bdf-41b9-aa43-763845d72472-ssh-key\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-7rd8r\" (UID: \"9342f970-0bdf-41b9-aa43-763845d72472\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-7rd8r" Oct 11 04:20:36 crc kubenswrapper[4798]: I1011 04:20:36.697173 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/9342f970-0bdf-41b9-aa43-763845d72472-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-7rd8r\" (UID: \"9342f970-0bdf-41b9-aa43-763845d72472\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-7rd8r" Oct 11 04:20:36 crc kubenswrapper[4798]: I1011 04:20:36.697201 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5xsfj\" (UniqueName: \"kubernetes.io/projected/9342f970-0bdf-41b9-aa43-763845d72472-kube-api-access-5xsfj\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-7rd8r\" (UID: \"9342f970-0bdf-41b9-aa43-763845d72472\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-7rd8r" Oct 11 04:20:36 crc kubenswrapper[4798]: I1011 04:20:36.697278 4798 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/8575ef22-7f1e-45b4-aeee-8ece74bb904f-ssh-key\") on node \"crc\" DevicePath \"\"" Oct 11 04:20:36 crc kubenswrapper[4798]: I1011 04:20:36.701981 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/9342f970-0bdf-41b9-aa43-763845d72472-ssh-key\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-7rd8r\" (UID: \"9342f970-0bdf-41b9-aa43-763845d72472\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-7rd8r" Oct 11 04:20:36 crc kubenswrapper[4798]: I1011 04:20:36.703080 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/9342f970-0bdf-41b9-aa43-763845d72472-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-7rd8r\" (UID: \"9342f970-0bdf-41b9-aa43-763845d72472\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-7rd8r" Oct 11 04:20:36 crc kubenswrapper[4798]: I1011 04:20:36.720940 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5xsfj\" (UniqueName: \"kubernetes.io/projected/9342f970-0bdf-41b9-aa43-763845d72472-kube-api-access-5xsfj\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-7rd8r\" (UID: \"9342f970-0bdf-41b9-aa43-763845d72472\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-7rd8r" Oct 11 04:20:36 crc kubenswrapper[4798]: I1011 04:20:36.754986 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-7rd8r" Oct 11 04:20:37 crc kubenswrapper[4798]: I1011 04:20:37.312381 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-7rd8r"] Oct 11 04:20:37 crc kubenswrapper[4798]: W1011 04:20:37.326592 4798 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9342f970_0bdf_41b9_aa43_763845d72472.slice/crio-6cc59928aa9bd11515c5241ddff0a92d01992a159d35a0dca95dfd0b80604dde WatchSource:0}: Error finding container 6cc59928aa9bd11515c5241ddff0a92d01992a159d35a0dca95dfd0b80604dde: Status 404 returned error can't find the container with id 6cc59928aa9bd11515c5241ddff0a92d01992a159d35a0dca95dfd0b80604dde Oct 11 04:20:37 crc kubenswrapper[4798]: I1011 04:20:37.399229 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-7rd8r" event={"ID":"9342f970-0bdf-41b9-aa43-763845d72472","Type":"ContainerStarted","Data":"6cc59928aa9bd11515c5241ddff0a92d01992a159d35a0dca95dfd0b80604dde"} Oct 11 04:20:38 crc kubenswrapper[4798]: I1011 04:20:38.411276 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-7rd8r" event={"ID":"9342f970-0bdf-41b9-aa43-763845d72472","Type":"ContainerStarted","Data":"7188573e76de3d9f65011bd8f18f171df4c220831714fa9bc31d8f4d9d46a525"} Oct 11 04:20:38 crc kubenswrapper[4798]: I1011 04:20:38.445795 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-7rd8r" podStartSLOduration=2.018907936 podStartE2EDuration="2.445767051s" podCreationTimestamp="2025-10-11 04:20:36 +0000 UTC" firstStartedPulling="2025-10-11 04:20:37.33181297 +0000 UTC m=+1532.668102656" lastFinishedPulling="2025-10-11 04:20:37.758672085 +0000 UTC m=+1533.094961771" observedRunningTime="2025-10-11 04:20:38.432520064 +0000 UTC m=+1533.768809760" watchObservedRunningTime="2025-10-11 04:20:38.445767051 +0000 UTC m=+1533.782056747" Oct 11 04:20:40 crc kubenswrapper[4798]: I1011 04:20:40.424219 4798 scope.go:117] "RemoveContainer" containerID="2e8e169485a90028020fcd80b69a1027ed01adf533cd6bd26b4fff5a221bbbae" Oct 11 04:20:40 crc kubenswrapper[4798]: E1011 04:20:40.424886 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h28s2_openshift-machine-config-operator(42571bc8-2186-4e3b-bba9-28f5a8f364d0)\"" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" Oct 11 04:20:43 crc kubenswrapper[4798]: I1011 04:20:43.063152 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-d706-account-create-sb7b5"] Oct 11 04:20:43 crc kubenswrapper[4798]: I1011 04:20:43.075510 4798 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-d706-account-create-sb7b5"] Oct 11 04:20:43 crc kubenswrapper[4798]: I1011 04:20:43.437602 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="23d21fa0-4e91-45e1-b694-8fc3d526b0e8" path="/var/lib/kubelet/pods/23d21fa0-4e91-45e1-b694-8fc3d526b0e8/volumes" Oct 11 04:20:44 crc kubenswrapper[4798]: I1011 04:20:44.037939 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-e33a-account-create-b6g6r"] Oct 11 04:20:44 crc kubenswrapper[4798]: I1011 04:20:44.054414 4798 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-e33a-account-create-b6g6r"] Oct 11 04:20:45 crc kubenswrapper[4798]: I1011 04:20:45.438190 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b194c956-398d-4e4d-9ada-7ac9a0323720" path="/var/lib/kubelet/pods/b194c956-398d-4e4d-9ada-7ac9a0323720/volumes" Oct 11 04:20:51 crc kubenswrapper[4798]: I1011 04:20:51.424111 4798 scope.go:117] "RemoveContainer" containerID="2e8e169485a90028020fcd80b69a1027ed01adf533cd6bd26b4fff5a221bbbae" Oct 11 04:20:51 crc kubenswrapper[4798]: E1011 04:20:51.425113 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h28s2_openshift-machine-config-operator(42571bc8-2186-4e3b-bba9-28f5a8f364d0)\"" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" Oct 11 04:21:00 crc kubenswrapper[4798]: I1011 04:21:00.065000 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-3b28-account-create-7w992"] Oct 11 04:21:00 crc kubenswrapper[4798]: I1011 04:21:00.084512 4798 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-3b28-account-create-7w992"] Oct 11 04:21:01 crc kubenswrapper[4798]: I1011 04:21:01.436018 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1c998cfd-5665-44b4-865c-122eaa9effac" path="/var/lib/kubelet/pods/1c998cfd-5665-44b4-865c-122eaa9effac/volumes" Oct 11 04:21:02 crc kubenswrapper[4798]: I1011 04:21:02.423852 4798 scope.go:117] "RemoveContainer" containerID="2e8e169485a90028020fcd80b69a1027ed01adf533cd6bd26b4fff5a221bbbae" Oct 11 04:21:02 crc kubenswrapper[4798]: E1011 04:21:02.424332 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h28s2_openshift-machine-config-operator(42571bc8-2186-4e3b-bba9-28f5a8f364d0)\"" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" Oct 11 04:21:10 crc kubenswrapper[4798]: I1011 04:21:10.025646 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-sync-tlsp9"] Oct 11 04:21:10 crc kubenswrapper[4798]: I1011 04:21:10.034071 4798 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-sync-tlsp9"] Oct 11 04:21:11 crc kubenswrapper[4798]: I1011 04:21:11.031715 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-create-rpgmd"] Oct 11 04:21:11 crc kubenswrapper[4798]: I1011 04:21:11.040732 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-create-s7bg6"] Oct 11 04:21:11 crc kubenswrapper[4798]: I1011 04:21:11.048854 4798 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-create-s7bg6"] Oct 11 04:21:11 crc kubenswrapper[4798]: I1011 04:21:11.056742 4798 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-create-rpgmd"] Oct 11 04:21:11 crc kubenswrapper[4798]: I1011 04:21:11.433820 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="00f6ca11-f294-4a81-8acd-d5fe3c5547fd" path="/var/lib/kubelet/pods/00f6ca11-f294-4a81-8acd-d5fe3c5547fd/volumes" Oct 11 04:21:11 crc kubenswrapper[4798]: I1011 04:21:11.434412 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5cba417d-7df3-4129-80f7-29514fd805b2" path="/var/lib/kubelet/pods/5cba417d-7df3-4129-80f7-29514fd805b2/volumes" Oct 11 04:21:11 crc kubenswrapper[4798]: I1011 04:21:11.434931 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="916c6d8b-b83d-4f53-8bfd-7e8c8edbb2ad" path="/var/lib/kubelet/pods/916c6d8b-b83d-4f53-8bfd-7e8c8edbb2ad/volumes" Oct 11 04:21:14 crc kubenswrapper[4798]: I1011 04:21:14.041261 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-create-rbwr7"] Oct 11 04:21:14 crc kubenswrapper[4798]: I1011 04:21:14.057338 4798 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-create-rbwr7"] Oct 11 04:21:14 crc kubenswrapper[4798]: I1011 04:21:14.424869 4798 scope.go:117] "RemoveContainer" containerID="2e8e169485a90028020fcd80b69a1027ed01adf533cd6bd26b4fff5a221bbbae" Oct 11 04:21:14 crc kubenswrapper[4798]: E1011 04:21:14.425377 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h28s2_openshift-machine-config-operator(42571bc8-2186-4e3b-bba9-28f5a8f364d0)\"" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" Oct 11 04:21:15 crc kubenswrapper[4798]: I1011 04:21:15.436373 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="15bea8df-46e9-43fc-9aee-1a303682485c" path="/var/lib/kubelet/pods/15bea8df-46e9-43fc-9aee-1a303682485c/volumes" Oct 11 04:21:18 crc kubenswrapper[4798]: I1011 04:21:18.031530 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-sync-fqtt8"] Oct 11 04:21:18 crc kubenswrapper[4798]: I1011 04:21:18.044700 4798 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-sync-fqtt8"] Oct 11 04:21:19 crc kubenswrapper[4798]: I1011 04:21:19.147351 4798 scope.go:117] "RemoveContainer" containerID="335f06c3c29669acc45bb6c719107e7af3f4a3b2766268cec4a2cc3963526b43" Oct 11 04:21:19 crc kubenswrapper[4798]: I1011 04:21:19.184035 4798 scope.go:117] "RemoveContainer" containerID="0b2f0cecf81461c70c1e403529c71c36026975d11053b8ff269926c43ceb906d" Oct 11 04:21:19 crc kubenswrapper[4798]: I1011 04:21:19.236883 4798 scope.go:117] "RemoveContainer" containerID="1112ef84fa4ce584316d1bf971ebaffd58b29e3d065518fe162acd42e5805d47" Oct 11 04:21:19 crc kubenswrapper[4798]: I1011 04:21:19.294457 4798 scope.go:117] "RemoveContainer" containerID="4ceedb7600d54728d2a4a22a17156a98f8281a97f5bebe41efa9f8c115b486be" Oct 11 04:21:19 crc kubenswrapper[4798]: I1011 04:21:19.336615 4798 scope.go:117] "RemoveContainer" containerID="4d1f27351d234df3025876454d6a4374ca0041c9421ca38c98902eddc7053ac9" Oct 11 04:21:19 crc kubenswrapper[4798]: I1011 04:21:19.368058 4798 scope.go:117] "RemoveContainer" containerID="cc6b6ff6d9199dcfcff6d49580de3753d5fd1a5c6ff619f25bda77015544b2d2" Oct 11 04:21:19 crc kubenswrapper[4798]: I1011 04:21:19.422534 4798 scope.go:117] "RemoveContainer" containerID="36ea9392bc0f636943ef337d2bfca61b4e8a9c22b9ee8ef3e09b321121467d41" Oct 11 04:21:19 crc kubenswrapper[4798]: I1011 04:21:19.443596 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6554e121-a137-45e1-83c2-f24b987099ea" path="/var/lib/kubelet/pods/6554e121-a137-45e1-83c2-f24b987099ea/volumes" Oct 11 04:21:19 crc kubenswrapper[4798]: I1011 04:21:19.452227 4798 scope.go:117] "RemoveContainer" containerID="a87b49e0a5ccb29297c80c6c43b764b62064414063987839fb791f90b874c281" Oct 11 04:21:19 crc kubenswrapper[4798]: I1011 04:21:19.511444 4798 scope.go:117] "RemoveContainer" containerID="66c42d5e14e026c76f83a4fd403d4d01e4ff70049715c794e92df73a16356279" Oct 11 04:21:19 crc kubenswrapper[4798]: I1011 04:21:19.543195 4798 scope.go:117] "RemoveContainer" containerID="4d04877a44dcc808f21667a8f3a3ad15f2784e8d8a7764f30c95b48d3117dcf5" Oct 11 04:21:19 crc kubenswrapper[4798]: I1011 04:21:19.802307 4798 generic.go:334] "Generic (PLEG): container finished" podID="9342f970-0bdf-41b9-aa43-763845d72472" containerID="7188573e76de3d9f65011bd8f18f171df4c220831714fa9bc31d8f4d9d46a525" exitCode=0 Oct 11 04:21:19 crc kubenswrapper[4798]: I1011 04:21:19.802346 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-7rd8r" event={"ID":"9342f970-0bdf-41b9-aa43-763845d72472","Type":"ContainerDied","Data":"7188573e76de3d9f65011bd8f18f171df4c220831714fa9bc31d8f4d9d46a525"} Oct 11 04:21:21 crc kubenswrapper[4798]: I1011 04:21:21.048294 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-4c2c-account-create-7gnkg"] Oct 11 04:21:21 crc kubenswrapper[4798]: I1011 04:21:21.059064 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-646e-account-create-c766h"] Oct 11 04:21:21 crc kubenswrapper[4798]: I1011 04:21:21.069880 4798 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-646e-account-create-c766h"] Oct 11 04:21:21 crc kubenswrapper[4798]: I1011 04:21:21.078750 4798 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-4c2c-account-create-7gnkg"] Oct 11 04:21:21 crc kubenswrapper[4798]: I1011 04:21:21.181219 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-7rd8r" Oct 11 04:21:21 crc kubenswrapper[4798]: I1011 04:21:21.315737 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/9342f970-0bdf-41b9-aa43-763845d72472-ssh-key\") pod \"9342f970-0bdf-41b9-aa43-763845d72472\" (UID: \"9342f970-0bdf-41b9-aa43-763845d72472\") " Oct 11 04:21:21 crc kubenswrapper[4798]: I1011 04:21:21.315887 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/9342f970-0bdf-41b9-aa43-763845d72472-inventory\") pod \"9342f970-0bdf-41b9-aa43-763845d72472\" (UID: \"9342f970-0bdf-41b9-aa43-763845d72472\") " Oct 11 04:21:21 crc kubenswrapper[4798]: I1011 04:21:21.315962 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5xsfj\" (UniqueName: \"kubernetes.io/projected/9342f970-0bdf-41b9-aa43-763845d72472-kube-api-access-5xsfj\") pod \"9342f970-0bdf-41b9-aa43-763845d72472\" (UID: \"9342f970-0bdf-41b9-aa43-763845d72472\") " Oct 11 04:21:21 crc kubenswrapper[4798]: I1011 04:21:21.323091 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9342f970-0bdf-41b9-aa43-763845d72472-kube-api-access-5xsfj" (OuterVolumeSpecName: "kube-api-access-5xsfj") pod "9342f970-0bdf-41b9-aa43-763845d72472" (UID: "9342f970-0bdf-41b9-aa43-763845d72472"). InnerVolumeSpecName "kube-api-access-5xsfj". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 04:21:21 crc kubenswrapper[4798]: I1011 04:21:21.343489 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9342f970-0bdf-41b9-aa43-763845d72472-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "9342f970-0bdf-41b9-aa43-763845d72472" (UID: "9342f970-0bdf-41b9-aa43-763845d72472"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:21:21 crc kubenswrapper[4798]: I1011 04:21:21.365766 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9342f970-0bdf-41b9-aa43-763845d72472-inventory" (OuterVolumeSpecName: "inventory") pod "9342f970-0bdf-41b9-aa43-763845d72472" (UID: "9342f970-0bdf-41b9-aa43-763845d72472"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:21:21 crc kubenswrapper[4798]: I1011 04:21:21.418279 4798 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/9342f970-0bdf-41b9-aa43-763845d72472-inventory\") on node \"crc\" DevicePath \"\"" Oct 11 04:21:21 crc kubenswrapper[4798]: I1011 04:21:21.418314 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5xsfj\" (UniqueName: \"kubernetes.io/projected/9342f970-0bdf-41b9-aa43-763845d72472-kube-api-access-5xsfj\") on node \"crc\" DevicePath \"\"" Oct 11 04:21:21 crc kubenswrapper[4798]: I1011 04:21:21.418328 4798 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/9342f970-0bdf-41b9-aa43-763845d72472-ssh-key\") on node \"crc\" DevicePath \"\"" Oct 11 04:21:21 crc kubenswrapper[4798]: I1011 04:21:21.435646 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="08c72420-5d00-49d3-aabb-527a1e9c22fb" path="/var/lib/kubelet/pods/08c72420-5d00-49d3-aabb-527a1e9c22fb/volumes" Oct 11 04:21:21 crc kubenswrapper[4798]: I1011 04:21:21.436581 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="af88f788-8ce4-4cf3-b570-4cc85140cb48" path="/var/lib/kubelet/pods/af88f788-8ce4-4cf3-b570-4cc85140cb48/volumes" Oct 11 04:21:21 crc kubenswrapper[4798]: I1011 04:21:21.831590 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-7rd8r" event={"ID":"9342f970-0bdf-41b9-aa43-763845d72472","Type":"ContainerDied","Data":"6cc59928aa9bd11515c5241ddff0a92d01992a159d35a0dca95dfd0b80604dde"} Oct 11 04:21:21 crc kubenswrapper[4798]: I1011 04:21:21.831637 4798 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6cc59928aa9bd11515c5241ddff0a92d01992a159d35a0dca95dfd0b80604dde" Oct 11 04:21:21 crc kubenswrapper[4798]: I1011 04:21:21.831695 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-7rd8r" Oct 11 04:21:21 crc kubenswrapper[4798]: I1011 04:21:21.961994 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-kh7b7"] Oct 11 04:21:21 crc kubenswrapper[4798]: E1011 04:21:21.962404 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9342f970-0bdf-41b9-aa43-763845d72472" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Oct 11 04:21:21 crc kubenswrapper[4798]: I1011 04:21:21.962422 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="9342f970-0bdf-41b9-aa43-763845d72472" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Oct 11 04:21:21 crc kubenswrapper[4798]: I1011 04:21:21.962595 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="9342f970-0bdf-41b9-aa43-763845d72472" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Oct 11 04:21:21 crc kubenswrapper[4798]: I1011 04:21:21.963191 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-kh7b7" Oct 11 04:21:21 crc kubenswrapper[4798]: I1011 04:21:21.966091 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Oct 11 04:21:21 crc kubenswrapper[4798]: I1011 04:21:21.966430 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Oct 11 04:21:21 crc kubenswrapper[4798]: I1011 04:21:21.966899 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Oct 11 04:21:21 crc kubenswrapper[4798]: I1011 04:21:21.967709 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-mgsh6" Oct 11 04:21:21 crc kubenswrapper[4798]: I1011 04:21:21.971855 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-kh7b7"] Oct 11 04:21:22 crc kubenswrapper[4798]: I1011 04:21:22.135009 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/bd3df659-3786-42ad-8ebb-00b3a6102640-inventory\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-kh7b7\" (UID: \"bd3df659-3786-42ad-8ebb-00b3a6102640\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-kh7b7" Oct 11 04:21:22 crc kubenswrapper[4798]: I1011 04:21:22.135230 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l5cgz\" (UniqueName: \"kubernetes.io/projected/bd3df659-3786-42ad-8ebb-00b3a6102640-kube-api-access-l5cgz\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-kh7b7\" (UID: \"bd3df659-3786-42ad-8ebb-00b3a6102640\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-kh7b7" Oct 11 04:21:22 crc kubenswrapper[4798]: I1011 04:21:22.135463 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/bd3df659-3786-42ad-8ebb-00b3a6102640-ssh-key\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-kh7b7\" (UID: \"bd3df659-3786-42ad-8ebb-00b3a6102640\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-kh7b7" Oct 11 04:21:22 crc kubenswrapper[4798]: I1011 04:21:22.236907 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/bd3df659-3786-42ad-8ebb-00b3a6102640-inventory\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-kh7b7\" (UID: \"bd3df659-3786-42ad-8ebb-00b3a6102640\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-kh7b7" Oct 11 04:21:22 crc kubenswrapper[4798]: I1011 04:21:22.236999 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l5cgz\" (UniqueName: \"kubernetes.io/projected/bd3df659-3786-42ad-8ebb-00b3a6102640-kube-api-access-l5cgz\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-kh7b7\" (UID: \"bd3df659-3786-42ad-8ebb-00b3a6102640\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-kh7b7" Oct 11 04:21:22 crc kubenswrapper[4798]: I1011 04:21:22.237067 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/bd3df659-3786-42ad-8ebb-00b3a6102640-ssh-key\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-kh7b7\" (UID: \"bd3df659-3786-42ad-8ebb-00b3a6102640\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-kh7b7" Oct 11 04:21:22 crc kubenswrapper[4798]: I1011 04:21:22.245340 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/bd3df659-3786-42ad-8ebb-00b3a6102640-inventory\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-kh7b7\" (UID: \"bd3df659-3786-42ad-8ebb-00b3a6102640\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-kh7b7" Oct 11 04:21:22 crc kubenswrapper[4798]: I1011 04:21:22.251120 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/bd3df659-3786-42ad-8ebb-00b3a6102640-ssh-key\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-kh7b7\" (UID: \"bd3df659-3786-42ad-8ebb-00b3a6102640\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-kh7b7" Oct 11 04:21:22 crc kubenswrapper[4798]: I1011 04:21:22.264437 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l5cgz\" (UniqueName: \"kubernetes.io/projected/bd3df659-3786-42ad-8ebb-00b3a6102640-kube-api-access-l5cgz\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-kh7b7\" (UID: \"bd3df659-3786-42ad-8ebb-00b3a6102640\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-kh7b7" Oct 11 04:21:22 crc kubenswrapper[4798]: I1011 04:21:22.285350 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-kh7b7" Oct 11 04:21:22 crc kubenswrapper[4798]: I1011 04:21:22.602747 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-kh7b7"] Oct 11 04:21:22 crc kubenswrapper[4798]: I1011 04:21:22.606800 4798 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Oct 11 04:21:22 crc kubenswrapper[4798]: I1011 04:21:22.842983 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-kh7b7" event={"ID":"bd3df659-3786-42ad-8ebb-00b3a6102640","Type":"ContainerStarted","Data":"a001f09c5705041e27bb992a876021190d02c65805400956d059307dbf3bd5ce"} Oct 11 04:21:23 crc kubenswrapper[4798]: I1011 04:21:23.852701 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-kh7b7" event={"ID":"bd3df659-3786-42ad-8ebb-00b3a6102640","Type":"ContainerStarted","Data":"f207351effe69a438689177ef964cc494a99b6d0c1e4a2fca9d0672064364f93"} Oct 11 04:21:23 crc kubenswrapper[4798]: I1011 04:21:23.883420 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-kh7b7" podStartSLOduration=2.404906859 podStartE2EDuration="2.883377952s" podCreationTimestamp="2025-10-11 04:21:21 +0000 UTC" firstStartedPulling="2025-10-11 04:21:22.606516224 +0000 UTC m=+1577.942805920" lastFinishedPulling="2025-10-11 04:21:23.084987317 +0000 UTC m=+1578.421277013" observedRunningTime="2025-10-11 04:21:23.872166463 +0000 UTC m=+1579.208456169" watchObservedRunningTime="2025-10-11 04:21:23.883377952 +0000 UTC m=+1579.219667658" Oct 11 04:21:27 crc kubenswrapper[4798]: I1011 04:21:27.920138 4798 generic.go:334] "Generic (PLEG): container finished" podID="bd3df659-3786-42ad-8ebb-00b3a6102640" containerID="f207351effe69a438689177ef964cc494a99b6d0c1e4a2fca9d0672064364f93" exitCode=0 Oct 11 04:21:27 crc kubenswrapper[4798]: I1011 04:21:27.920233 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-kh7b7" event={"ID":"bd3df659-3786-42ad-8ebb-00b3a6102640","Type":"ContainerDied","Data":"f207351effe69a438689177ef964cc494a99b6d0c1e4a2fca9d0672064364f93"} Oct 11 04:21:28 crc kubenswrapper[4798]: I1011 04:21:28.423899 4798 scope.go:117] "RemoveContainer" containerID="2e8e169485a90028020fcd80b69a1027ed01adf533cd6bd26b4fff5a221bbbae" Oct 11 04:21:28 crc kubenswrapper[4798]: E1011 04:21:28.424132 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h28s2_openshift-machine-config-operator(42571bc8-2186-4e3b-bba9-28f5a8f364d0)\"" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" Oct 11 04:21:29 crc kubenswrapper[4798]: I1011 04:21:29.290313 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-kh7b7" Oct 11 04:21:29 crc kubenswrapper[4798]: I1011 04:21:29.470695 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l5cgz\" (UniqueName: \"kubernetes.io/projected/bd3df659-3786-42ad-8ebb-00b3a6102640-kube-api-access-l5cgz\") pod \"bd3df659-3786-42ad-8ebb-00b3a6102640\" (UID: \"bd3df659-3786-42ad-8ebb-00b3a6102640\") " Oct 11 04:21:29 crc kubenswrapper[4798]: I1011 04:21:29.470876 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/bd3df659-3786-42ad-8ebb-00b3a6102640-inventory\") pod \"bd3df659-3786-42ad-8ebb-00b3a6102640\" (UID: \"bd3df659-3786-42ad-8ebb-00b3a6102640\") " Oct 11 04:21:29 crc kubenswrapper[4798]: I1011 04:21:29.470937 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/bd3df659-3786-42ad-8ebb-00b3a6102640-ssh-key\") pod \"bd3df659-3786-42ad-8ebb-00b3a6102640\" (UID: \"bd3df659-3786-42ad-8ebb-00b3a6102640\") " Oct 11 04:21:29 crc kubenswrapper[4798]: I1011 04:21:29.487155 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bd3df659-3786-42ad-8ebb-00b3a6102640-kube-api-access-l5cgz" (OuterVolumeSpecName: "kube-api-access-l5cgz") pod "bd3df659-3786-42ad-8ebb-00b3a6102640" (UID: "bd3df659-3786-42ad-8ebb-00b3a6102640"). InnerVolumeSpecName "kube-api-access-l5cgz". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 04:21:29 crc kubenswrapper[4798]: I1011 04:21:29.501504 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bd3df659-3786-42ad-8ebb-00b3a6102640-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "bd3df659-3786-42ad-8ebb-00b3a6102640" (UID: "bd3df659-3786-42ad-8ebb-00b3a6102640"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:21:29 crc kubenswrapper[4798]: I1011 04:21:29.511066 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bd3df659-3786-42ad-8ebb-00b3a6102640-inventory" (OuterVolumeSpecName: "inventory") pod "bd3df659-3786-42ad-8ebb-00b3a6102640" (UID: "bd3df659-3786-42ad-8ebb-00b3a6102640"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:21:29 crc kubenswrapper[4798]: I1011 04:21:29.572496 4798 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/bd3df659-3786-42ad-8ebb-00b3a6102640-inventory\") on node \"crc\" DevicePath \"\"" Oct 11 04:21:29 crc kubenswrapper[4798]: I1011 04:21:29.572526 4798 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/bd3df659-3786-42ad-8ebb-00b3a6102640-ssh-key\") on node \"crc\" DevicePath \"\"" Oct 11 04:21:29 crc kubenswrapper[4798]: I1011 04:21:29.572537 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l5cgz\" (UniqueName: \"kubernetes.io/projected/bd3df659-3786-42ad-8ebb-00b3a6102640-kube-api-access-l5cgz\") on node \"crc\" DevicePath \"\"" Oct 11 04:21:29 crc kubenswrapper[4798]: I1011 04:21:29.941633 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-kh7b7" event={"ID":"bd3df659-3786-42ad-8ebb-00b3a6102640","Type":"ContainerDied","Data":"a001f09c5705041e27bb992a876021190d02c65805400956d059307dbf3bd5ce"} Oct 11 04:21:29 crc kubenswrapper[4798]: I1011 04:21:29.941861 4798 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a001f09c5705041e27bb992a876021190d02c65805400956d059307dbf3bd5ce" Oct 11 04:21:29 crc kubenswrapper[4798]: I1011 04:21:29.941732 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-kh7b7" Oct 11 04:21:29 crc kubenswrapper[4798]: I1011 04:21:29.999049 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-g57xb"] Oct 11 04:21:29 crc kubenswrapper[4798]: E1011 04:21:29.999423 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bd3df659-3786-42ad-8ebb-00b3a6102640" containerName="ceph-hci-pre-edpm-deployment-openstack-edpm-ipam" Oct 11 04:21:29 crc kubenswrapper[4798]: I1011 04:21:29.999440 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="bd3df659-3786-42ad-8ebb-00b3a6102640" containerName="ceph-hci-pre-edpm-deployment-openstack-edpm-ipam" Oct 11 04:21:29 crc kubenswrapper[4798]: I1011 04:21:29.999606 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="bd3df659-3786-42ad-8ebb-00b3a6102640" containerName="ceph-hci-pre-edpm-deployment-openstack-edpm-ipam" Oct 11 04:21:30 crc kubenswrapper[4798]: I1011 04:21:30.000156 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-g57xb" Oct 11 04:21:30 crc kubenswrapper[4798]: I1011 04:21:30.004211 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Oct 11 04:21:30 crc kubenswrapper[4798]: I1011 04:21:30.004900 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Oct 11 04:21:30 crc kubenswrapper[4798]: I1011 04:21:30.005554 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-mgsh6" Oct 11 04:21:30 crc kubenswrapper[4798]: I1011 04:21:30.005845 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Oct 11 04:21:30 crc kubenswrapper[4798]: I1011 04:21:30.015305 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-g57xb"] Oct 11 04:21:30 crc kubenswrapper[4798]: I1011 04:21:30.185728 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b0f1aed4-3351-4dc5-8642-e580307cdb53-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-g57xb\" (UID: \"b0f1aed4-3351-4dc5-8642-e580307cdb53\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-g57xb" Oct 11 04:21:30 crc kubenswrapper[4798]: I1011 04:21:30.186370 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b0f1aed4-3351-4dc5-8642-e580307cdb53-ssh-key\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-g57xb\" (UID: \"b0f1aed4-3351-4dc5-8642-e580307cdb53\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-g57xb" Oct 11 04:21:30 crc kubenswrapper[4798]: I1011 04:21:30.186537 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gbgdz\" (UniqueName: \"kubernetes.io/projected/b0f1aed4-3351-4dc5-8642-e580307cdb53-kube-api-access-gbgdz\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-g57xb\" (UID: \"b0f1aed4-3351-4dc5-8642-e580307cdb53\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-g57xb" Oct 11 04:21:30 crc kubenswrapper[4798]: I1011 04:21:30.288528 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b0f1aed4-3351-4dc5-8642-e580307cdb53-ssh-key\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-g57xb\" (UID: \"b0f1aed4-3351-4dc5-8642-e580307cdb53\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-g57xb" Oct 11 04:21:30 crc kubenswrapper[4798]: I1011 04:21:30.288601 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gbgdz\" (UniqueName: \"kubernetes.io/projected/b0f1aed4-3351-4dc5-8642-e580307cdb53-kube-api-access-gbgdz\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-g57xb\" (UID: \"b0f1aed4-3351-4dc5-8642-e580307cdb53\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-g57xb" Oct 11 04:21:30 crc kubenswrapper[4798]: I1011 04:21:30.288665 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b0f1aed4-3351-4dc5-8642-e580307cdb53-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-g57xb\" (UID: \"b0f1aed4-3351-4dc5-8642-e580307cdb53\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-g57xb" Oct 11 04:21:30 crc kubenswrapper[4798]: I1011 04:21:30.293358 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b0f1aed4-3351-4dc5-8642-e580307cdb53-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-g57xb\" (UID: \"b0f1aed4-3351-4dc5-8642-e580307cdb53\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-g57xb" Oct 11 04:21:30 crc kubenswrapper[4798]: I1011 04:21:30.293472 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b0f1aed4-3351-4dc5-8642-e580307cdb53-ssh-key\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-g57xb\" (UID: \"b0f1aed4-3351-4dc5-8642-e580307cdb53\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-g57xb" Oct 11 04:21:30 crc kubenswrapper[4798]: I1011 04:21:30.307774 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gbgdz\" (UniqueName: \"kubernetes.io/projected/b0f1aed4-3351-4dc5-8642-e580307cdb53-kube-api-access-gbgdz\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-g57xb\" (UID: \"b0f1aed4-3351-4dc5-8642-e580307cdb53\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-g57xb" Oct 11 04:21:30 crc kubenswrapper[4798]: I1011 04:21:30.319547 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-g57xb" Oct 11 04:21:30 crc kubenswrapper[4798]: I1011 04:21:30.888265 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-g57xb"] Oct 11 04:21:30 crc kubenswrapper[4798]: I1011 04:21:30.951860 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-g57xb" event={"ID":"b0f1aed4-3351-4dc5-8642-e580307cdb53","Type":"ContainerStarted","Data":"cb6175af08b4a8d58e702eed117df83c17f5d1a1a34a1163d9e900b92478f1d7"} Oct 11 04:21:31 crc kubenswrapper[4798]: I1011 04:21:31.960497 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-g57xb" event={"ID":"b0f1aed4-3351-4dc5-8642-e580307cdb53","Type":"ContainerStarted","Data":"9e4450dc66f6c28607d124753046d846ce33c520e4b6c6ea4985f9ad1cebc521"} Oct 11 04:21:31 crc kubenswrapper[4798]: I1011 04:21:31.985452 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-g57xb" podStartSLOduration=2.572444876 podStartE2EDuration="2.985435979s" podCreationTimestamp="2025-10-11 04:21:29 +0000 UTC" firstStartedPulling="2025-10-11 04:21:30.90157785 +0000 UTC m=+1586.237867546" lastFinishedPulling="2025-10-11 04:21:31.314568923 +0000 UTC m=+1586.650858649" observedRunningTime="2025-10-11 04:21:31.979883986 +0000 UTC m=+1587.316173692" watchObservedRunningTime="2025-10-11 04:21:31.985435979 +0000 UTC m=+1587.321725665" Oct 11 04:21:33 crc kubenswrapper[4798]: I1011 04:21:33.044256 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-sync-pjqvn"] Oct 11 04:21:33 crc kubenswrapper[4798]: I1011 04:21:33.051333 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-01e2-account-create-d9hs4"] Oct 11 04:21:33 crc kubenswrapper[4798]: I1011 04:21:33.059322 4798 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-sync-pjqvn"] Oct 11 04:21:33 crc kubenswrapper[4798]: I1011 04:21:33.065751 4798 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-01e2-account-create-d9hs4"] Oct 11 04:21:33 crc kubenswrapper[4798]: I1011 04:21:33.434757 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="00f2a7d2-ced9-4a97-97d9-e002e8b0ebbd" path="/var/lib/kubelet/pods/00f2a7d2-ced9-4a97-97d9-e002e8b0ebbd/volumes" Oct 11 04:21:33 crc kubenswrapper[4798]: I1011 04:21:33.435301 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dea5f749-0ae2-4385-b9fd-675a6d651cd8" path="/var/lib/kubelet/pods/dea5f749-0ae2-4385-b9fd-675a6d651cd8/volumes" Oct 11 04:21:39 crc kubenswrapper[4798]: I1011 04:21:39.423852 4798 scope.go:117] "RemoveContainer" containerID="2e8e169485a90028020fcd80b69a1027ed01adf533cd6bd26b4fff5a221bbbae" Oct 11 04:21:39 crc kubenswrapper[4798]: E1011 04:21:39.424921 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h28s2_openshift-machine-config-operator(42571bc8-2186-4e3b-bba9-28f5a8f364d0)\"" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" Oct 11 04:21:44 crc kubenswrapper[4798]: I1011 04:21:44.035602 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-phfnr"] Oct 11 04:21:44 crc kubenswrapper[4798]: I1011 04:21:44.050030 4798 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-phfnr"] Oct 11 04:21:45 crc kubenswrapper[4798]: I1011 04:21:45.444352 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d696f8bc-027f-43c0-b58d-72ea3f3f9c5c" path="/var/lib/kubelet/pods/d696f8bc-027f-43c0-b58d-72ea3f3f9c5c/volumes" Oct 11 04:21:46 crc kubenswrapper[4798]: I1011 04:21:46.029018 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-sync-2ltmb"] Oct 11 04:21:46 crc kubenswrapper[4798]: I1011 04:21:46.037941 4798 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-sync-2ltmb"] Oct 11 04:21:47 crc kubenswrapper[4798]: I1011 04:21:47.434023 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="79172d81-dfa6-4863-afbe-1e8c8b622f6d" path="/var/lib/kubelet/pods/79172d81-dfa6-4863-afbe-1e8c8b622f6d/volumes" Oct 11 04:21:54 crc kubenswrapper[4798]: I1011 04:21:54.423479 4798 scope.go:117] "RemoveContainer" containerID="2e8e169485a90028020fcd80b69a1027ed01adf533cd6bd26b4fff5a221bbbae" Oct 11 04:21:54 crc kubenswrapper[4798]: E1011 04:21:54.424849 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h28s2_openshift-machine-config-operator(42571bc8-2186-4e3b-bba9-28f5a8f364d0)\"" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" Oct 11 04:22:09 crc kubenswrapper[4798]: I1011 04:22:09.423231 4798 scope.go:117] "RemoveContainer" containerID="2e8e169485a90028020fcd80b69a1027ed01adf533cd6bd26b4fff5a221bbbae" Oct 11 04:22:09 crc kubenswrapper[4798]: E1011 04:22:09.423970 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h28s2_openshift-machine-config-operator(42571bc8-2186-4e3b-bba9-28f5a8f364d0)\"" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" Oct 11 04:22:10 crc kubenswrapper[4798]: I1011 04:22:10.045490 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-sync-btwq2"] Oct 11 04:22:10 crc kubenswrapper[4798]: I1011 04:22:10.061501 4798 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-sync-btwq2"] Oct 11 04:22:11 crc kubenswrapper[4798]: I1011 04:22:11.441619 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c7e46251-4b37-40c6-adbc-877857e4442b" path="/var/lib/kubelet/pods/c7e46251-4b37-40c6-adbc-877857e4442b/volumes" Oct 11 04:22:13 crc kubenswrapper[4798]: I1011 04:22:13.039798 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-sync-f5wn5"] Oct 11 04:22:13 crc kubenswrapper[4798]: I1011 04:22:13.061246 4798 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-sync-f5wn5"] Oct 11 04:22:13 crc kubenswrapper[4798]: I1011 04:22:13.433731 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1afeba46-c2cc-455e-ab80-56f553c9e4cb" path="/var/lib/kubelet/pods/1afeba46-c2cc-455e-ab80-56f553c9e4cb/volumes" Oct 11 04:22:19 crc kubenswrapper[4798]: I1011 04:22:19.758115 4798 scope.go:117] "RemoveContainer" containerID="fba64869f56c8567efd4fa21cf654a04b3cbd1bfe2f2ecb58411fa2ae429aec9" Oct 11 04:22:19 crc kubenswrapper[4798]: I1011 04:22:19.804819 4798 scope.go:117] "RemoveContainer" containerID="5b05062cc436b6c50869a63e442b96759ab9057beac4469a2039e02cb3aeddd4" Oct 11 04:22:19 crc kubenswrapper[4798]: I1011 04:22:19.868342 4798 scope.go:117] "RemoveContainer" containerID="d7b534cde5c58a64218ca1e17bf4d02a4350cb00ef91cce26941c9cdb2660a63" Oct 11 04:22:19 crc kubenswrapper[4798]: I1011 04:22:19.902193 4798 scope.go:117] "RemoveContainer" containerID="26420071971f8dbe0fb85a3a7c12df46a1f5fb44fb1420e0ed0622a421d484da" Oct 11 04:22:19 crc kubenswrapper[4798]: I1011 04:22:19.948484 4798 scope.go:117] "RemoveContainer" containerID="7e4683a342af7fb86273eb4c55f7d8fc13cd540c7a4d7a8535f881cb6e4fc355" Oct 11 04:22:19 crc kubenswrapper[4798]: I1011 04:22:19.997486 4798 scope.go:117] "RemoveContainer" containerID="7197dc6f8173a46b362dedb05e974a07ce4ae4188dc3c4a11df371869b498fbc" Oct 11 04:22:20 crc kubenswrapper[4798]: I1011 04:22:20.046553 4798 scope.go:117] "RemoveContainer" containerID="bf1751f30a0b27a52a548cb5acab60ae68ed52dad84d483157ab0932d7862973" Oct 11 04:22:20 crc kubenswrapper[4798]: I1011 04:22:20.067444 4798 scope.go:117] "RemoveContainer" containerID="31f5f9c766ed1d6c74a4ec0cb1db8809ceeb6ed936246a20fc889987020aec7c" Oct 11 04:22:20 crc kubenswrapper[4798]: I1011 04:22:20.088266 4798 scope.go:117] "RemoveContainer" containerID="030525f9e7fdc8065aa8247288ea651fa2f72ac181e7e395bc50305c695da2f2" Oct 11 04:22:23 crc kubenswrapper[4798]: I1011 04:22:23.424245 4798 scope.go:117] "RemoveContainer" containerID="2e8e169485a90028020fcd80b69a1027ed01adf533cd6bd26b4fff5a221bbbae" Oct 11 04:22:23 crc kubenswrapper[4798]: E1011 04:22:23.425020 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h28s2_openshift-machine-config-operator(42571bc8-2186-4e3b-bba9-28f5a8f364d0)\"" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" Oct 11 04:22:29 crc kubenswrapper[4798]: I1011 04:22:29.504067 4798 generic.go:334] "Generic (PLEG): container finished" podID="b0f1aed4-3351-4dc5-8642-e580307cdb53" containerID="9e4450dc66f6c28607d124753046d846ce33c520e4b6c6ea4985f9ad1cebc521" exitCode=2 Oct 11 04:22:29 crc kubenswrapper[4798]: I1011 04:22:29.504152 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-g57xb" event={"ID":"b0f1aed4-3351-4dc5-8642-e580307cdb53","Type":"ContainerDied","Data":"9e4450dc66f6c28607d124753046d846ce33c520e4b6c6ea4985f9ad1cebc521"} Oct 11 04:22:30 crc kubenswrapper[4798]: I1011 04:22:30.937276 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-g57xb" Oct 11 04:22:31 crc kubenswrapper[4798]: I1011 04:22:31.090521 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gbgdz\" (UniqueName: \"kubernetes.io/projected/b0f1aed4-3351-4dc5-8642-e580307cdb53-kube-api-access-gbgdz\") pod \"b0f1aed4-3351-4dc5-8642-e580307cdb53\" (UID: \"b0f1aed4-3351-4dc5-8642-e580307cdb53\") " Oct 11 04:22:31 crc kubenswrapper[4798]: I1011 04:22:31.090949 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b0f1aed4-3351-4dc5-8642-e580307cdb53-inventory\") pod \"b0f1aed4-3351-4dc5-8642-e580307cdb53\" (UID: \"b0f1aed4-3351-4dc5-8642-e580307cdb53\") " Oct 11 04:22:31 crc kubenswrapper[4798]: I1011 04:22:31.091028 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b0f1aed4-3351-4dc5-8642-e580307cdb53-ssh-key\") pod \"b0f1aed4-3351-4dc5-8642-e580307cdb53\" (UID: \"b0f1aed4-3351-4dc5-8642-e580307cdb53\") " Oct 11 04:22:31 crc kubenswrapper[4798]: I1011 04:22:31.097102 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b0f1aed4-3351-4dc5-8642-e580307cdb53-kube-api-access-gbgdz" (OuterVolumeSpecName: "kube-api-access-gbgdz") pod "b0f1aed4-3351-4dc5-8642-e580307cdb53" (UID: "b0f1aed4-3351-4dc5-8642-e580307cdb53"). InnerVolumeSpecName "kube-api-access-gbgdz". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 04:22:31 crc kubenswrapper[4798]: I1011 04:22:31.116659 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b0f1aed4-3351-4dc5-8642-e580307cdb53-inventory" (OuterVolumeSpecName: "inventory") pod "b0f1aed4-3351-4dc5-8642-e580307cdb53" (UID: "b0f1aed4-3351-4dc5-8642-e580307cdb53"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:22:31 crc kubenswrapper[4798]: I1011 04:22:31.118859 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b0f1aed4-3351-4dc5-8642-e580307cdb53-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "b0f1aed4-3351-4dc5-8642-e580307cdb53" (UID: "b0f1aed4-3351-4dc5-8642-e580307cdb53"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:22:31 crc kubenswrapper[4798]: I1011 04:22:31.193941 4798 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b0f1aed4-3351-4dc5-8642-e580307cdb53-inventory\") on node \"crc\" DevicePath \"\"" Oct 11 04:22:31 crc kubenswrapper[4798]: I1011 04:22:31.194003 4798 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b0f1aed4-3351-4dc5-8642-e580307cdb53-ssh-key\") on node \"crc\" DevicePath \"\"" Oct 11 04:22:31 crc kubenswrapper[4798]: I1011 04:22:31.194012 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gbgdz\" (UniqueName: \"kubernetes.io/projected/b0f1aed4-3351-4dc5-8642-e580307cdb53-kube-api-access-gbgdz\") on node \"crc\" DevicePath \"\"" Oct 11 04:22:31 crc kubenswrapper[4798]: I1011 04:22:31.523262 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-g57xb" event={"ID":"b0f1aed4-3351-4dc5-8642-e580307cdb53","Type":"ContainerDied","Data":"cb6175af08b4a8d58e702eed117df83c17f5d1a1a34a1163d9e900b92478f1d7"} Oct 11 04:22:31 crc kubenswrapper[4798]: I1011 04:22:31.523600 4798 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="cb6175af08b4a8d58e702eed117df83c17f5d1a1a34a1163d9e900b92478f1d7" Oct 11 04:22:31 crc kubenswrapper[4798]: I1011 04:22:31.523474 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-g57xb" Oct 11 04:22:34 crc kubenswrapper[4798]: I1011 04:22:34.423170 4798 scope.go:117] "RemoveContainer" containerID="2e8e169485a90028020fcd80b69a1027ed01adf533cd6bd26b4fff5a221bbbae" Oct 11 04:22:34 crc kubenswrapper[4798]: E1011 04:22:34.423734 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h28s2_openshift-machine-config-operator(42571bc8-2186-4e3b-bba9-28f5a8f364d0)\"" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" Oct 11 04:22:38 crc kubenswrapper[4798]: I1011 04:22:38.034849 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-cd282"] Oct 11 04:22:38 crc kubenswrapper[4798]: E1011 04:22:38.036080 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b0f1aed4-3351-4dc5-8642-e580307cdb53" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Oct 11 04:22:38 crc kubenswrapper[4798]: I1011 04:22:38.036096 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="b0f1aed4-3351-4dc5-8642-e580307cdb53" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Oct 11 04:22:38 crc kubenswrapper[4798]: I1011 04:22:38.036286 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="b0f1aed4-3351-4dc5-8642-e580307cdb53" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Oct 11 04:22:38 crc kubenswrapper[4798]: I1011 04:22:38.036994 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-cd282" Oct 11 04:22:38 crc kubenswrapper[4798]: I1011 04:22:38.044705 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Oct 11 04:22:38 crc kubenswrapper[4798]: I1011 04:22:38.045435 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Oct 11 04:22:38 crc kubenswrapper[4798]: I1011 04:22:38.045598 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-mgsh6" Oct 11 04:22:38 crc kubenswrapper[4798]: I1011 04:22:38.045753 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Oct 11 04:22:38 crc kubenswrapper[4798]: I1011 04:22:38.053681 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-cd282"] Oct 11 04:22:38 crc kubenswrapper[4798]: I1011 04:22:38.149035 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/80751ff0-128a-4b49-8eb3-a3fb93afd239-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-cd282\" (UID: \"80751ff0-128a-4b49-8eb3-a3fb93afd239\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-cd282" Oct 11 04:22:38 crc kubenswrapper[4798]: I1011 04:22:38.149212 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4kqzx\" (UniqueName: \"kubernetes.io/projected/80751ff0-128a-4b49-8eb3-a3fb93afd239-kube-api-access-4kqzx\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-cd282\" (UID: \"80751ff0-128a-4b49-8eb3-a3fb93afd239\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-cd282" Oct 11 04:22:38 crc kubenswrapper[4798]: I1011 04:22:38.149258 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/80751ff0-128a-4b49-8eb3-a3fb93afd239-ssh-key\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-cd282\" (UID: \"80751ff0-128a-4b49-8eb3-a3fb93afd239\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-cd282" Oct 11 04:22:38 crc kubenswrapper[4798]: I1011 04:22:38.251290 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/80751ff0-128a-4b49-8eb3-a3fb93afd239-ssh-key\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-cd282\" (UID: \"80751ff0-128a-4b49-8eb3-a3fb93afd239\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-cd282" Oct 11 04:22:38 crc kubenswrapper[4798]: I1011 04:22:38.251526 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/80751ff0-128a-4b49-8eb3-a3fb93afd239-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-cd282\" (UID: \"80751ff0-128a-4b49-8eb3-a3fb93afd239\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-cd282" Oct 11 04:22:38 crc kubenswrapper[4798]: I1011 04:22:38.251718 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4kqzx\" (UniqueName: \"kubernetes.io/projected/80751ff0-128a-4b49-8eb3-a3fb93afd239-kube-api-access-4kqzx\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-cd282\" (UID: \"80751ff0-128a-4b49-8eb3-a3fb93afd239\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-cd282" Oct 11 04:22:38 crc kubenswrapper[4798]: I1011 04:22:38.259173 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/80751ff0-128a-4b49-8eb3-a3fb93afd239-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-cd282\" (UID: \"80751ff0-128a-4b49-8eb3-a3fb93afd239\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-cd282" Oct 11 04:22:38 crc kubenswrapper[4798]: I1011 04:22:38.262756 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/80751ff0-128a-4b49-8eb3-a3fb93afd239-ssh-key\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-cd282\" (UID: \"80751ff0-128a-4b49-8eb3-a3fb93afd239\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-cd282" Oct 11 04:22:38 crc kubenswrapper[4798]: I1011 04:22:38.271224 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4kqzx\" (UniqueName: \"kubernetes.io/projected/80751ff0-128a-4b49-8eb3-a3fb93afd239-kube-api-access-4kqzx\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-cd282\" (UID: \"80751ff0-128a-4b49-8eb3-a3fb93afd239\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-cd282" Oct 11 04:22:38 crc kubenswrapper[4798]: I1011 04:22:38.400001 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-cd282" Oct 11 04:22:38 crc kubenswrapper[4798]: I1011 04:22:38.971644 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-cd282"] Oct 11 04:22:39 crc kubenswrapper[4798]: I1011 04:22:39.604875 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-cd282" event={"ID":"80751ff0-128a-4b49-8eb3-a3fb93afd239","Type":"ContainerStarted","Data":"7d911e7fa3bb5ea42c5b549615a700a5fb6891eee63acb33fa79c421e3c6c541"} Oct 11 04:22:40 crc kubenswrapper[4798]: I1011 04:22:40.614373 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-cd282" event={"ID":"80751ff0-128a-4b49-8eb3-a3fb93afd239","Type":"ContainerStarted","Data":"caa9d29e42c40384d44151d80a7dcda6cc89eade70ff41e804b0fc0f7296030a"} Oct 11 04:22:46 crc kubenswrapper[4798]: I1011 04:22:46.423956 4798 scope.go:117] "RemoveContainer" containerID="2e8e169485a90028020fcd80b69a1027ed01adf533cd6bd26b4fff5a221bbbae" Oct 11 04:22:46 crc kubenswrapper[4798]: E1011 04:22:46.424585 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h28s2_openshift-machine-config-operator(42571bc8-2186-4e3b-bba9-28f5a8f364d0)\"" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" Oct 11 04:22:51 crc kubenswrapper[4798]: I1011 04:22:51.055207 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-cd282" podStartSLOduration=12.411734039 podStartE2EDuration="13.055183671s" podCreationTimestamp="2025-10-11 04:22:38 +0000 UTC" firstStartedPulling="2025-10-11 04:22:38.972818889 +0000 UTC m=+1654.309108615" lastFinishedPulling="2025-10-11 04:22:39.616268531 +0000 UTC m=+1654.952558247" observedRunningTime="2025-10-11 04:22:40.6367887 +0000 UTC m=+1655.973078386" watchObservedRunningTime="2025-10-11 04:22:51.055183671 +0000 UTC m=+1666.391473357" Oct 11 04:22:51 crc kubenswrapper[4798]: I1011 04:22:51.057865 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-db-create-px8b4"] Oct 11 04:22:51 crc kubenswrapper[4798]: I1011 04:22:51.067310 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-db-create-8vgc6"] Oct 11 04:22:51 crc kubenswrapper[4798]: I1011 04:22:51.079522 4798 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-db-create-px8b4"] Oct 11 04:22:51 crc kubenswrapper[4798]: I1011 04:22:51.087873 4798 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-db-create-8vgc6"] Oct 11 04:22:51 crc kubenswrapper[4798]: I1011 04:22:51.095306 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-db-create-f42tc"] Oct 11 04:22:51 crc kubenswrapper[4798]: I1011 04:22:51.102731 4798 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-db-create-f42tc"] Oct 11 04:22:51 crc kubenswrapper[4798]: I1011 04:22:51.437654 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3173f67b-5d6d-4e62-89e0-1e97f7fdc299" path="/var/lib/kubelet/pods/3173f67b-5d6d-4e62-89e0-1e97f7fdc299/volumes" Oct 11 04:22:51 crc kubenswrapper[4798]: I1011 04:22:51.438578 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="605e3f63-9cb1-4dc6-8455-ae0a7af9b164" path="/var/lib/kubelet/pods/605e3f63-9cb1-4dc6-8455-ae0a7af9b164/volumes" Oct 11 04:22:51 crc kubenswrapper[4798]: I1011 04:22:51.439267 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f4a180e5-98b6-4211-8e75-1107f4a3530b" path="/var/lib/kubelet/pods/f4a180e5-98b6-4211-8e75-1107f4a3530b/volumes" Oct 11 04:23:00 crc kubenswrapper[4798]: I1011 04:23:00.042495 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-7c3d-account-create-f9s8l"] Oct 11 04:23:00 crc kubenswrapper[4798]: I1011 04:23:00.053276 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-504f-account-create-2dqzs"] Oct 11 04:23:00 crc kubenswrapper[4798]: I1011 04:23:00.059246 4798 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-504f-account-create-2dqzs"] Oct 11 04:23:00 crc kubenswrapper[4798]: I1011 04:23:00.065782 4798 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-7c3d-account-create-f9s8l"] Oct 11 04:23:00 crc kubenswrapper[4798]: I1011 04:23:00.424683 4798 scope.go:117] "RemoveContainer" containerID="2e8e169485a90028020fcd80b69a1027ed01adf533cd6bd26b4fff5a221bbbae" Oct 11 04:23:00 crc kubenswrapper[4798]: E1011 04:23:00.425678 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h28s2_openshift-machine-config-operator(42571bc8-2186-4e3b-bba9-28f5a8f364d0)\"" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" Oct 11 04:23:01 crc kubenswrapper[4798]: I1011 04:23:01.026356 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-0bfb-account-create-7nxnt"] Oct 11 04:23:01 crc kubenswrapper[4798]: I1011 04:23:01.040325 4798 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-0bfb-account-create-7nxnt"] Oct 11 04:23:01 crc kubenswrapper[4798]: I1011 04:23:01.436807 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2ea903c7-d1f2-455c-80ca-e672df23712b" path="/var/lib/kubelet/pods/2ea903c7-d1f2-455c-80ca-e672df23712b/volumes" Oct 11 04:23:01 crc kubenswrapper[4798]: I1011 04:23:01.437806 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="603093d3-dcc2-4ee2-be29-de2505f5c238" path="/var/lib/kubelet/pods/603093d3-dcc2-4ee2-be29-de2505f5c238/volumes" Oct 11 04:23:01 crc kubenswrapper[4798]: I1011 04:23:01.438547 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="680c8968-661e-40dd-b095-190bab5e1fac" path="/var/lib/kubelet/pods/680c8968-661e-40dd-b095-190bab5e1fac/volumes" Oct 11 04:23:13 crc kubenswrapper[4798]: I1011 04:23:13.424891 4798 scope.go:117] "RemoveContainer" containerID="2e8e169485a90028020fcd80b69a1027ed01adf533cd6bd26b4fff5a221bbbae" Oct 11 04:23:13 crc kubenswrapper[4798]: E1011 04:23:13.426919 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h28s2_openshift-machine-config-operator(42571bc8-2186-4e3b-bba9-28f5a8f364d0)\"" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" Oct 11 04:23:20 crc kubenswrapper[4798]: I1011 04:23:20.279758 4798 scope.go:117] "RemoveContainer" containerID="4bb45b48c7f376b270a80f3af76073b1e89305955e9f4c24ea09f34f9c84e69b" Oct 11 04:23:20 crc kubenswrapper[4798]: I1011 04:23:20.339732 4798 scope.go:117] "RemoveContainer" containerID="4560682c05da7184bb3fc5dedadb1f06d54929d6892f26dcdc2520647490abc5" Oct 11 04:23:20 crc kubenswrapper[4798]: I1011 04:23:20.377213 4798 scope.go:117] "RemoveContainer" containerID="af1db6e406e7a9fd9bc4f359dbf713d537270527d53e1b4c5ced64cc7e242abd" Oct 11 04:23:20 crc kubenswrapper[4798]: I1011 04:23:20.419196 4798 scope.go:117] "RemoveContainer" containerID="1de8a781698d423ab551bbcc210192ee891f6a2823c2e5ccd708cb795e666b0e" Oct 11 04:23:20 crc kubenswrapper[4798]: I1011 04:23:20.461209 4798 scope.go:117] "RemoveContainer" containerID="d4e2a2b908733935ff933bdbfadb57ba237ddcc88bbf8529a93d38e6dd027710" Oct 11 04:23:20 crc kubenswrapper[4798]: I1011 04:23:20.502273 4798 scope.go:117] "RemoveContainer" containerID="a353e9b02444c11ce479d86c7230c69f097d8eeac9fcf6abaec7ab52a3a871d6" Oct 11 04:23:26 crc kubenswrapper[4798]: I1011 04:23:26.038659 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-t4fxb"] Oct 11 04:23:26 crc kubenswrapper[4798]: I1011 04:23:26.046322 4798 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-t4fxb"] Oct 11 04:23:26 crc kubenswrapper[4798]: I1011 04:23:26.423622 4798 scope.go:117] "RemoveContainer" containerID="2e8e169485a90028020fcd80b69a1027ed01adf533cd6bd26b4fff5a221bbbae" Oct 11 04:23:26 crc kubenswrapper[4798]: E1011 04:23:26.423922 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h28s2_openshift-machine-config-operator(42571bc8-2186-4e3b-bba9-28f5a8f364d0)\"" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" Oct 11 04:23:27 crc kubenswrapper[4798]: I1011 04:23:27.435213 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b5ce0d5d-bf5d-4e53-b93b-6748bcd257c9" path="/var/lib/kubelet/pods/b5ce0d5d-bf5d-4e53-b93b-6748bcd257c9/volumes" Oct 11 04:23:30 crc kubenswrapper[4798]: I1011 04:23:30.084849 4798 generic.go:334] "Generic (PLEG): container finished" podID="80751ff0-128a-4b49-8eb3-a3fb93afd239" containerID="caa9d29e42c40384d44151d80a7dcda6cc89eade70ff41e804b0fc0f7296030a" exitCode=0 Oct 11 04:23:30 crc kubenswrapper[4798]: I1011 04:23:30.084895 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-cd282" event={"ID":"80751ff0-128a-4b49-8eb3-a3fb93afd239","Type":"ContainerDied","Data":"caa9d29e42c40384d44151d80a7dcda6cc89eade70ff41e804b0fc0f7296030a"} Oct 11 04:23:31 crc kubenswrapper[4798]: I1011 04:23:31.466185 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-cd282" Oct 11 04:23:31 crc kubenswrapper[4798]: I1011 04:23:31.569934 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/80751ff0-128a-4b49-8eb3-a3fb93afd239-ssh-key\") pod \"80751ff0-128a-4b49-8eb3-a3fb93afd239\" (UID: \"80751ff0-128a-4b49-8eb3-a3fb93afd239\") " Oct 11 04:23:31 crc kubenswrapper[4798]: I1011 04:23:31.570097 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4kqzx\" (UniqueName: \"kubernetes.io/projected/80751ff0-128a-4b49-8eb3-a3fb93afd239-kube-api-access-4kqzx\") pod \"80751ff0-128a-4b49-8eb3-a3fb93afd239\" (UID: \"80751ff0-128a-4b49-8eb3-a3fb93afd239\") " Oct 11 04:23:31 crc kubenswrapper[4798]: I1011 04:23:31.571754 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/80751ff0-128a-4b49-8eb3-a3fb93afd239-inventory\") pod \"80751ff0-128a-4b49-8eb3-a3fb93afd239\" (UID: \"80751ff0-128a-4b49-8eb3-a3fb93afd239\") " Oct 11 04:23:31 crc kubenswrapper[4798]: I1011 04:23:31.577215 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/80751ff0-128a-4b49-8eb3-a3fb93afd239-kube-api-access-4kqzx" (OuterVolumeSpecName: "kube-api-access-4kqzx") pod "80751ff0-128a-4b49-8eb3-a3fb93afd239" (UID: "80751ff0-128a-4b49-8eb3-a3fb93afd239"). InnerVolumeSpecName "kube-api-access-4kqzx". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 04:23:31 crc kubenswrapper[4798]: I1011 04:23:31.606929 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/80751ff0-128a-4b49-8eb3-a3fb93afd239-inventory" (OuterVolumeSpecName: "inventory") pod "80751ff0-128a-4b49-8eb3-a3fb93afd239" (UID: "80751ff0-128a-4b49-8eb3-a3fb93afd239"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:23:31 crc kubenswrapper[4798]: I1011 04:23:31.609618 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/80751ff0-128a-4b49-8eb3-a3fb93afd239-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "80751ff0-128a-4b49-8eb3-a3fb93afd239" (UID: "80751ff0-128a-4b49-8eb3-a3fb93afd239"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:23:31 crc kubenswrapper[4798]: I1011 04:23:31.676921 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4kqzx\" (UniqueName: \"kubernetes.io/projected/80751ff0-128a-4b49-8eb3-a3fb93afd239-kube-api-access-4kqzx\") on node \"crc\" DevicePath \"\"" Oct 11 04:23:31 crc kubenswrapper[4798]: I1011 04:23:31.677357 4798 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/80751ff0-128a-4b49-8eb3-a3fb93afd239-inventory\") on node \"crc\" DevicePath \"\"" Oct 11 04:23:31 crc kubenswrapper[4798]: I1011 04:23:31.677376 4798 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/80751ff0-128a-4b49-8eb3-a3fb93afd239-ssh-key\") on node \"crc\" DevicePath \"\"" Oct 11 04:23:32 crc kubenswrapper[4798]: I1011 04:23:32.099273 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-cd282" event={"ID":"80751ff0-128a-4b49-8eb3-a3fb93afd239","Type":"ContainerDied","Data":"7d911e7fa3bb5ea42c5b549615a700a5fb6891eee63acb33fa79c421e3c6c541"} Oct 11 04:23:32 crc kubenswrapper[4798]: I1011 04:23:32.099324 4798 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7d911e7fa3bb5ea42c5b549615a700a5fb6891eee63acb33fa79c421e3c6c541" Oct 11 04:23:32 crc kubenswrapper[4798]: I1011 04:23:32.099320 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-cd282" Oct 11 04:23:32 crc kubenswrapper[4798]: I1011 04:23:32.173564 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-jcscs"] Oct 11 04:23:32 crc kubenswrapper[4798]: E1011 04:23:32.173928 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="80751ff0-128a-4b49-8eb3-a3fb93afd239" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Oct 11 04:23:32 crc kubenswrapper[4798]: I1011 04:23:32.173947 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="80751ff0-128a-4b49-8eb3-a3fb93afd239" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Oct 11 04:23:32 crc kubenswrapper[4798]: I1011 04:23:32.174119 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="80751ff0-128a-4b49-8eb3-a3fb93afd239" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Oct 11 04:23:32 crc kubenswrapper[4798]: I1011 04:23:32.176014 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-jcscs" Oct 11 04:23:32 crc kubenswrapper[4798]: I1011 04:23:32.178282 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-mgsh6" Oct 11 04:23:32 crc kubenswrapper[4798]: I1011 04:23:32.178303 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Oct 11 04:23:32 crc kubenswrapper[4798]: I1011 04:23:32.178812 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Oct 11 04:23:32 crc kubenswrapper[4798]: I1011 04:23:32.180090 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Oct 11 04:23:32 crc kubenswrapper[4798]: I1011 04:23:32.182779 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-jcscs"] Oct 11 04:23:32 crc kubenswrapper[4798]: I1011 04:23:32.189058 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zjbhq\" (UniqueName: \"kubernetes.io/projected/cf5a942a-e817-42ee-a871-b840b9e4aaa0-kube-api-access-zjbhq\") pod \"ssh-known-hosts-edpm-deployment-jcscs\" (UID: \"cf5a942a-e817-42ee-a871-b840b9e4aaa0\") " pod="openstack/ssh-known-hosts-edpm-deployment-jcscs" Oct 11 04:23:32 crc kubenswrapper[4798]: I1011 04:23:32.189185 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/cf5a942a-e817-42ee-a871-b840b9e4aaa0-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-jcscs\" (UID: \"cf5a942a-e817-42ee-a871-b840b9e4aaa0\") " pod="openstack/ssh-known-hosts-edpm-deployment-jcscs" Oct 11 04:23:32 crc kubenswrapper[4798]: I1011 04:23:32.189216 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/cf5a942a-e817-42ee-a871-b840b9e4aaa0-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-jcscs\" (UID: \"cf5a942a-e817-42ee-a871-b840b9e4aaa0\") " pod="openstack/ssh-known-hosts-edpm-deployment-jcscs" Oct 11 04:23:32 crc kubenswrapper[4798]: I1011 04:23:32.291764 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/cf5a942a-e817-42ee-a871-b840b9e4aaa0-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-jcscs\" (UID: \"cf5a942a-e817-42ee-a871-b840b9e4aaa0\") " pod="openstack/ssh-known-hosts-edpm-deployment-jcscs" Oct 11 04:23:32 crc kubenswrapper[4798]: I1011 04:23:32.291881 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/cf5a942a-e817-42ee-a871-b840b9e4aaa0-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-jcscs\" (UID: \"cf5a942a-e817-42ee-a871-b840b9e4aaa0\") " pod="openstack/ssh-known-hosts-edpm-deployment-jcscs" Oct 11 04:23:32 crc kubenswrapper[4798]: I1011 04:23:32.292140 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zjbhq\" (UniqueName: \"kubernetes.io/projected/cf5a942a-e817-42ee-a871-b840b9e4aaa0-kube-api-access-zjbhq\") pod \"ssh-known-hosts-edpm-deployment-jcscs\" (UID: \"cf5a942a-e817-42ee-a871-b840b9e4aaa0\") " pod="openstack/ssh-known-hosts-edpm-deployment-jcscs" Oct 11 04:23:32 crc kubenswrapper[4798]: I1011 04:23:32.296711 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/cf5a942a-e817-42ee-a871-b840b9e4aaa0-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-jcscs\" (UID: \"cf5a942a-e817-42ee-a871-b840b9e4aaa0\") " pod="openstack/ssh-known-hosts-edpm-deployment-jcscs" Oct 11 04:23:32 crc kubenswrapper[4798]: I1011 04:23:32.297256 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/cf5a942a-e817-42ee-a871-b840b9e4aaa0-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-jcscs\" (UID: \"cf5a942a-e817-42ee-a871-b840b9e4aaa0\") " pod="openstack/ssh-known-hosts-edpm-deployment-jcscs" Oct 11 04:23:32 crc kubenswrapper[4798]: I1011 04:23:32.312838 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zjbhq\" (UniqueName: \"kubernetes.io/projected/cf5a942a-e817-42ee-a871-b840b9e4aaa0-kube-api-access-zjbhq\") pod \"ssh-known-hosts-edpm-deployment-jcscs\" (UID: \"cf5a942a-e817-42ee-a871-b840b9e4aaa0\") " pod="openstack/ssh-known-hosts-edpm-deployment-jcscs" Oct 11 04:23:32 crc kubenswrapper[4798]: I1011 04:23:32.493295 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-jcscs" Oct 11 04:23:33 crc kubenswrapper[4798]: I1011 04:23:33.072167 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-jcscs"] Oct 11 04:23:33 crc kubenswrapper[4798]: I1011 04:23:33.114573 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-jcscs" event={"ID":"cf5a942a-e817-42ee-a871-b840b9e4aaa0","Type":"ContainerStarted","Data":"7dda4192a063269bdbad8e7435831036c71f50a6385b4243af4dbbc0b5ceba4b"} Oct 11 04:23:34 crc kubenswrapper[4798]: I1011 04:23:34.150266 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-jcscs" event={"ID":"cf5a942a-e817-42ee-a871-b840b9e4aaa0","Type":"ContainerStarted","Data":"3d1456356390a4fd7657806f5dcb13eabe8d0cbd1a59d84d676145cec492ddf4"} Oct 11 04:23:34 crc kubenswrapper[4798]: I1011 04:23:34.174000 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ssh-known-hosts-edpm-deployment-jcscs" podStartSLOduration=1.658222407 podStartE2EDuration="2.173972627s" podCreationTimestamp="2025-10-11 04:23:32 +0000 UTC" firstStartedPulling="2025-10-11 04:23:33.07821043 +0000 UTC m=+1708.414500116" lastFinishedPulling="2025-10-11 04:23:33.59396064 +0000 UTC m=+1708.930250336" observedRunningTime="2025-10-11 04:23:34.163471645 +0000 UTC m=+1709.499761331" watchObservedRunningTime="2025-10-11 04:23:34.173972627 +0000 UTC m=+1709.510262333" Oct 11 04:23:38 crc kubenswrapper[4798]: I1011 04:23:38.423274 4798 scope.go:117] "RemoveContainer" containerID="2e8e169485a90028020fcd80b69a1027ed01adf533cd6bd26b4fff5a221bbbae" Oct 11 04:23:38 crc kubenswrapper[4798]: E1011 04:23:38.424850 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h28s2_openshift-machine-config-operator(42571bc8-2186-4e3b-bba9-28f5a8f364d0)\"" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" Oct 11 04:23:41 crc kubenswrapper[4798]: I1011 04:23:41.213686 4798 generic.go:334] "Generic (PLEG): container finished" podID="cf5a942a-e817-42ee-a871-b840b9e4aaa0" containerID="3d1456356390a4fd7657806f5dcb13eabe8d0cbd1a59d84d676145cec492ddf4" exitCode=0 Oct 11 04:23:41 crc kubenswrapper[4798]: I1011 04:23:41.213835 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-jcscs" event={"ID":"cf5a942a-e817-42ee-a871-b840b9e4aaa0","Type":"ContainerDied","Data":"3d1456356390a4fd7657806f5dcb13eabe8d0cbd1a59d84d676145cec492ddf4"} Oct 11 04:23:42 crc kubenswrapper[4798]: I1011 04:23:42.656511 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-jcscs" Oct 11 04:23:42 crc kubenswrapper[4798]: I1011 04:23:42.782953 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/cf5a942a-e817-42ee-a871-b840b9e4aaa0-inventory-0\") pod \"cf5a942a-e817-42ee-a871-b840b9e4aaa0\" (UID: \"cf5a942a-e817-42ee-a871-b840b9e4aaa0\") " Oct 11 04:23:42 crc kubenswrapper[4798]: I1011 04:23:42.783109 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zjbhq\" (UniqueName: \"kubernetes.io/projected/cf5a942a-e817-42ee-a871-b840b9e4aaa0-kube-api-access-zjbhq\") pod \"cf5a942a-e817-42ee-a871-b840b9e4aaa0\" (UID: \"cf5a942a-e817-42ee-a871-b840b9e4aaa0\") " Oct 11 04:23:42 crc kubenswrapper[4798]: I1011 04:23:42.783193 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/cf5a942a-e817-42ee-a871-b840b9e4aaa0-ssh-key-openstack-edpm-ipam\") pod \"cf5a942a-e817-42ee-a871-b840b9e4aaa0\" (UID: \"cf5a942a-e817-42ee-a871-b840b9e4aaa0\") " Oct 11 04:23:42 crc kubenswrapper[4798]: I1011 04:23:42.788350 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cf5a942a-e817-42ee-a871-b840b9e4aaa0-kube-api-access-zjbhq" (OuterVolumeSpecName: "kube-api-access-zjbhq") pod "cf5a942a-e817-42ee-a871-b840b9e4aaa0" (UID: "cf5a942a-e817-42ee-a871-b840b9e4aaa0"). InnerVolumeSpecName "kube-api-access-zjbhq". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 04:23:42 crc kubenswrapper[4798]: I1011 04:23:42.810928 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cf5a942a-e817-42ee-a871-b840b9e4aaa0-inventory-0" (OuterVolumeSpecName: "inventory-0") pod "cf5a942a-e817-42ee-a871-b840b9e4aaa0" (UID: "cf5a942a-e817-42ee-a871-b840b9e4aaa0"). InnerVolumeSpecName "inventory-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:23:42 crc kubenswrapper[4798]: I1011 04:23:42.810970 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cf5a942a-e817-42ee-a871-b840b9e4aaa0-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "cf5a942a-e817-42ee-a871-b840b9e4aaa0" (UID: "cf5a942a-e817-42ee-a871-b840b9e4aaa0"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:23:42 crc kubenswrapper[4798]: I1011 04:23:42.885365 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zjbhq\" (UniqueName: \"kubernetes.io/projected/cf5a942a-e817-42ee-a871-b840b9e4aaa0-kube-api-access-zjbhq\") on node \"crc\" DevicePath \"\"" Oct 11 04:23:42 crc kubenswrapper[4798]: I1011 04:23:42.885408 4798 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/cf5a942a-e817-42ee-a871-b840b9e4aaa0-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Oct 11 04:23:42 crc kubenswrapper[4798]: I1011 04:23:42.885421 4798 reconciler_common.go:293] "Volume detached for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/cf5a942a-e817-42ee-a871-b840b9e4aaa0-inventory-0\") on node \"crc\" DevicePath \"\"" Oct 11 04:23:43 crc kubenswrapper[4798]: I1011 04:23:43.235039 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-jcscs" event={"ID":"cf5a942a-e817-42ee-a871-b840b9e4aaa0","Type":"ContainerDied","Data":"7dda4192a063269bdbad8e7435831036c71f50a6385b4243af4dbbc0b5ceba4b"} Oct 11 04:23:43 crc kubenswrapper[4798]: I1011 04:23:43.235095 4798 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7dda4192a063269bdbad8e7435831036c71f50a6385b4243af4dbbc0b5ceba4b" Oct 11 04:23:43 crc kubenswrapper[4798]: I1011 04:23:43.235168 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-jcscs" Oct 11 04:23:43 crc kubenswrapper[4798]: I1011 04:23:43.315620 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-9dv72"] Oct 11 04:23:43 crc kubenswrapper[4798]: E1011 04:23:43.316521 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cf5a942a-e817-42ee-a871-b840b9e4aaa0" containerName="ssh-known-hosts-edpm-deployment" Oct 11 04:23:43 crc kubenswrapper[4798]: I1011 04:23:43.316551 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="cf5a942a-e817-42ee-a871-b840b9e4aaa0" containerName="ssh-known-hosts-edpm-deployment" Oct 11 04:23:43 crc kubenswrapper[4798]: I1011 04:23:43.316975 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="cf5a942a-e817-42ee-a871-b840b9e4aaa0" containerName="ssh-known-hosts-edpm-deployment" Oct 11 04:23:43 crc kubenswrapper[4798]: I1011 04:23:43.320609 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-9dv72" Oct 11 04:23:43 crc kubenswrapper[4798]: I1011 04:23:43.326171 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Oct 11 04:23:43 crc kubenswrapper[4798]: I1011 04:23:43.326889 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Oct 11 04:23:43 crc kubenswrapper[4798]: I1011 04:23:43.329105 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Oct 11 04:23:43 crc kubenswrapper[4798]: I1011 04:23:43.335170 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-9dv72"] Oct 11 04:23:43 crc kubenswrapper[4798]: I1011 04:23:43.343426 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-mgsh6" Oct 11 04:23:43 crc kubenswrapper[4798]: I1011 04:23:43.393667 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/eb105376-3b5c-4a06-97db-ded802e69b66-ssh-key\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-9dv72\" (UID: \"eb105376-3b5c-4a06-97db-ded802e69b66\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-9dv72" Oct 11 04:23:43 crc kubenswrapper[4798]: I1011 04:23:43.393752 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fwlkq\" (UniqueName: \"kubernetes.io/projected/eb105376-3b5c-4a06-97db-ded802e69b66-kube-api-access-fwlkq\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-9dv72\" (UID: \"eb105376-3b5c-4a06-97db-ded802e69b66\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-9dv72" Oct 11 04:23:43 crc kubenswrapper[4798]: I1011 04:23:43.393796 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/eb105376-3b5c-4a06-97db-ded802e69b66-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-9dv72\" (UID: \"eb105376-3b5c-4a06-97db-ded802e69b66\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-9dv72" Oct 11 04:23:43 crc kubenswrapper[4798]: I1011 04:23:43.496234 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/eb105376-3b5c-4a06-97db-ded802e69b66-ssh-key\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-9dv72\" (UID: \"eb105376-3b5c-4a06-97db-ded802e69b66\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-9dv72" Oct 11 04:23:43 crc kubenswrapper[4798]: I1011 04:23:43.496308 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fwlkq\" (UniqueName: \"kubernetes.io/projected/eb105376-3b5c-4a06-97db-ded802e69b66-kube-api-access-fwlkq\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-9dv72\" (UID: \"eb105376-3b5c-4a06-97db-ded802e69b66\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-9dv72" Oct 11 04:23:43 crc kubenswrapper[4798]: I1011 04:23:43.496356 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/eb105376-3b5c-4a06-97db-ded802e69b66-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-9dv72\" (UID: \"eb105376-3b5c-4a06-97db-ded802e69b66\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-9dv72" Oct 11 04:23:43 crc kubenswrapper[4798]: I1011 04:23:43.500159 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/eb105376-3b5c-4a06-97db-ded802e69b66-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-9dv72\" (UID: \"eb105376-3b5c-4a06-97db-ded802e69b66\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-9dv72" Oct 11 04:23:43 crc kubenswrapper[4798]: I1011 04:23:43.501098 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/eb105376-3b5c-4a06-97db-ded802e69b66-ssh-key\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-9dv72\" (UID: \"eb105376-3b5c-4a06-97db-ded802e69b66\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-9dv72" Oct 11 04:23:43 crc kubenswrapper[4798]: I1011 04:23:43.517733 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fwlkq\" (UniqueName: \"kubernetes.io/projected/eb105376-3b5c-4a06-97db-ded802e69b66-kube-api-access-fwlkq\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-9dv72\" (UID: \"eb105376-3b5c-4a06-97db-ded802e69b66\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-9dv72" Oct 11 04:23:43 crc kubenswrapper[4798]: I1011 04:23:43.654088 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-9dv72" Oct 11 04:23:44 crc kubenswrapper[4798]: I1011 04:23:44.032584 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-cell-mapping-tqmm5"] Oct 11 04:23:44 crc kubenswrapper[4798]: I1011 04:23:44.040694 4798 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-cell-mapping-tqmm5"] Oct 11 04:23:44 crc kubenswrapper[4798]: I1011 04:23:44.185148 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-9dv72"] Oct 11 04:23:44 crc kubenswrapper[4798]: I1011 04:23:44.244017 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-9dv72" event={"ID":"eb105376-3b5c-4a06-97db-ded802e69b66","Type":"ContainerStarted","Data":"c127e2304124a7bb1a024efe8fc50aa6fed9bf8421bf13495d8c5f59bb96e166"} Oct 11 04:23:45 crc kubenswrapper[4798]: I1011 04:23:45.043446 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-vj9wj"] Oct 11 04:23:45 crc kubenswrapper[4798]: I1011 04:23:45.050793 4798 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-vj9wj"] Oct 11 04:23:45 crc kubenswrapper[4798]: I1011 04:23:45.255781 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-9dv72" event={"ID":"eb105376-3b5c-4a06-97db-ded802e69b66","Type":"ContainerStarted","Data":"e8ca87499a6303c18f9a4940a14639daabe9581c876c120adb9853a3ddfcec42"} Oct 11 04:23:45 crc kubenswrapper[4798]: I1011 04:23:45.437352 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2ab3945b-9225-4901-a3ed-57ea8dcaf456" path="/var/lib/kubelet/pods/2ab3945b-9225-4901-a3ed-57ea8dcaf456/volumes" Oct 11 04:23:45 crc kubenswrapper[4798]: I1011 04:23:45.437992 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2da346d6-33cf-4000-8cf9-f269b86bda5d" path="/var/lib/kubelet/pods/2da346d6-33cf-4000-8cf9-f269b86bda5d/volumes" Oct 11 04:23:50 crc kubenswrapper[4798]: I1011 04:23:50.423902 4798 scope.go:117] "RemoveContainer" containerID="2e8e169485a90028020fcd80b69a1027ed01adf533cd6bd26b4fff5a221bbbae" Oct 11 04:23:50 crc kubenswrapper[4798]: E1011 04:23:50.424767 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h28s2_openshift-machine-config-operator(42571bc8-2186-4e3b-bba9-28f5a8f364d0)\"" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" Oct 11 04:23:53 crc kubenswrapper[4798]: I1011 04:23:53.327700 4798 generic.go:334] "Generic (PLEG): container finished" podID="eb105376-3b5c-4a06-97db-ded802e69b66" containerID="e8ca87499a6303c18f9a4940a14639daabe9581c876c120adb9853a3ddfcec42" exitCode=0 Oct 11 04:23:53 crc kubenswrapper[4798]: I1011 04:23:53.327803 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-9dv72" event={"ID":"eb105376-3b5c-4a06-97db-ded802e69b66","Type":"ContainerDied","Data":"e8ca87499a6303c18f9a4940a14639daabe9581c876c120adb9853a3ddfcec42"} Oct 11 04:23:54 crc kubenswrapper[4798]: I1011 04:23:54.883979 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-9dv72" Oct 11 04:23:55 crc kubenswrapper[4798]: I1011 04:23:55.008524 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/eb105376-3b5c-4a06-97db-ded802e69b66-ssh-key\") pod \"eb105376-3b5c-4a06-97db-ded802e69b66\" (UID: \"eb105376-3b5c-4a06-97db-ded802e69b66\") " Oct 11 04:23:55 crc kubenswrapper[4798]: I1011 04:23:55.008573 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/eb105376-3b5c-4a06-97db-ded802e69b66-inventory\") pod \"eb105376-3b5c-4a06-97db-ded802e69b66\" (UID: \"eb105376-3b5c-4a06-97db-ded802e69b66\") " Oct 11 04:23:55 crc kubenswrapper[4798]: I1011 04:23:55.008690 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fwlkq\" (UniqueName: \"kubernetes.io/projected/eb105376-3b5c-4a06-97db-ded802e69b66-kube-api-access-fwlkq\") pod \"eb105376-3b5c-4a06-97db-ded802e69b66\" (UID: \"eb105376-3b5c-4a06-97db-ded802e69b66\") " Oct 11 04:23:55 crc kubenswrapper[4798]: I1011 04:23:55.015194 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/eb105376-3b5c-4a06-97db-ded802e69b66-kube-api-access-fwlkq" (OuterVolumeSpecName: "kube-api-access-fwlkq") pod "eb105376-3b5c-4a06-97db-ded802e69b66" (UID: "eb105376-3b5c-4a06-97db-ded802e69b66"). InnerVolumeSpecName "kube-api-access-fwlkq". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 04:23:55 crc kubenswrapper[4798]: I1011 04:23:55.037035 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/eb105376-3b5c-4a06-97db-ded802e69b66-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "eb105376-3b5c-4a06-97db-ded802e69b66" (UID: "eb105376-3b5c-4a06-97db-ded802e69b66"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:23:55 crc kubenswrapper[4798]: I1011 04:23:55.046825 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/eb105376-3b5c-4a06-97db-ded802e69b66-inventory" (OuterVolumeSpecName: "inventory") pod "eb105376-3b5c-4a06-97db-ded802e69b66" (UID: "eb105376-3b5c-4a06-97db-ded802e69b66"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:23:55 crc kubenswrapper[4798]: I1011 04:23:55.111918 4798 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/eb105376-3b5c-4a06-97db-ded802e69b66-ssh-key\") on node \"crc\" DevicePath \"\"" Oct 11 04:23:55 crc kubenswrapper[4798]: I1011 04:23:55.111957 4798 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/eb105376-3b5c-4a06-97db-ded802e69b66-inventory\") on node \"crc\" DevicePath \"\"" Oct 11 04:23:55 crc kubenswrapper[4798]: I1011 04:23:55.111967 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fwlkq\" (UniqueName: \"kubernetes.io/projected/eb105376-3b5c-4a06-97db-ded802e69b66-kube-api-access-fwlkq\") on node \"crc\" DevicePath \"\"" Oct 11 04:23:55 crc kubenswrapper[4798]: I1011 04:23:55.347154 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-9dv72" event={"ID":"eb105376-3b5c-4a06-97db-ded802e69b66","Type":"ContainerDied","Data":"c127e2304124a7bb1a024efe8fc50aa6fed9bf8421bf13495d8c5f59bb96e166"} Oct 11 04:23:55 crc kubenswrapper[4798]: I1011 04:23:55.347485 4798 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c127e2304124a7bb1a024efe8fc50aa6fed9bf8421bf13495d8c5f59bb96e166" Oct 11 04:23:55 crc kubenswrapper[4798]: I1011 04:23:55.347221 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-9dv72" Oct 11 04:23:55 crc kubenswrapper[4798]: I1011 04:23:55.452427 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-l5lmp"] Oct 11 04:23:55 crc kubenswrapper[4798]: E1011 04:23:55.452899 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eb105376-3b5c-4a06-97db-ded802e69b66" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Oct 11 04:23:55 crc kubenswrapper[4798]: I1011 04:23:55.452926 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="eb105376-3b5c-4a06-97db-ded802e69b66" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Oct 11 04:23:55 crc kubenswrapper[4798]: I1011 04:23:55.453168 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="eb105376-3b5c-4a06-97db-ded802e69b66" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Oct 11 04:23:55 crc kubenswrapper[4798]: I1011 04:23:55.453940 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-l5lmp" Oct 11 04:23:55 crc kubenswrapper[4798]: I1011 04:23:55.455743 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Oct 11 04:23:55 crc kubenswrapper[4798]: I1011 04:23:55.455891 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Oct 11 04:23:55 crc kubenswrapper[4798]: I1011 04:23:55.456013 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-mgsh6" Oct 11 04:23:55 crc kubenswrapper[4798]: I1011 04:23:55.456958 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Oct 11 04:23:55 crc kubenswrapper[4798]: I1011 04:23:55.462384 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-l5lmp"] Oct 11 04:23:55 crc kubenswrapper[4798]: I1011 04:23:55.621369 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ljwg7\" (UniqueName: \"kubernetes.io/projected/ebd6e80f-3d02-4d21-a9a9-ff2838ca32d0-kube-api-access-ljwg7\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-l5lmp\" (UID: \"ebd6e80f-3d02-4d21-a9a9-ff2838ca32d0\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-l5lmp" Oct 11 04:23:55 crc kubenswrapper[4798]: I1011 04:23:55.621894 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ebd6e80f-3d02-4d21-a9a9-ff2838ca32d0-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-l5lmp\" (UID: \"ebd6e80f-3d02-4d21-a9a9-ff2838ca32d0\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-l5lmp" Oct 11 04:23:55 crc kubenswrapper[4798]: I1011 04:23:55.622276 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ebd6e80f-3d02-4d21-a9a9-ff2838ca32d0-ssh-key\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-l5lmp\" (UID: \"ebd6e80f-3d02-4d21-a9a9-ff2838ca32d0\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-l5lmp" Oct 11 04:23:55 crc kubenswrapper[4798]: I1011 04:23:55.723703 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ebd6e80f-3d02-4d21-a9a9-ff2838ca32d0-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-l5lmp\" (UID: \"ebd6e80f-3d02-4d21-a9a9-ff2838ca32d0\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-l5lmp" Oct 11 04:23:55 crc kubenswrapper[4798]: I1011 04:23:55.723777 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ebd6e80f-3d02-4d21-a9a9-ff2838ca32d0-ssh-key\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-l5lmp\" (UID: \"ebd6e80f-3d02-4d21-a9a9-ff2838ca32d0\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-l5lmp" Oct 11 04:23:55 crc kubenswrapper[4798]: I1011 04:23:55.723874 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ljwg7\" (UniqueName: \"kubernetes.io/projected/ebd6e80f-3d02-4d21-a9a9-ff2838ca32d0-kube-api-access-ljwg7\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-l5lmp\" (UID: \"ebd6e80f-3d02-4d21-a9a9-ff2838ca32d0\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-l5lmp" Oct 11 04:23:55 crc kubenswrapper[4798]: I1011 04:23:55.727964 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ebd6e80f-3d02-4d21-a9a9-ff2838ca32d0-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-l5lmp\" (UID: \"ebd6e80f-3d02-4d21-a9a9-ff2838ca32d0\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-l5lmp" Oct 11 04:23:55 crc kubenswrapper[4798]: I1011 04:23:55.733114 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ebd6e80f-3d02-4d21-a9a9-ff2838ca32d0-ssh-key\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-l5lmp\" (UID: \"ebd6e80f-3d02-4d21-a9a9-ff2838ca32d0\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-l5lmp" Oct 11 04:23:55 crc kubenswrapper[4798]: I1011 04:23:55.740711 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ljwg7\" (UniqueName: \"kubernetes.io/projected/ebd6e80f-3d02-4d21-a9a9-ff2838ca32d0-kube-api-access-ljwg7\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-l5lmp\" (UID: \"ebd6e80f-3d02-4d21-a9a9-ff2838ca32d0\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-l5lmp" Oct 11 04:23:55 crc kubenswrapper[4798]: I1011 04:23:55.777479 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-l5lmp" Oct 11 04:23:56 crc kubenswrapper[4798]: I1011 04:23:56.111183 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-l5lmp"] Oct 11 04:23:56 crc kubenswrapper[4798]: I1011 04:23:56.355786 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-l5lmp" event={"ID":"ebd6e80f-3d02-4d21-a9a9-ff2838ca32d0","Type":"ContainerStarted","Data":"ffdd340ab717fd3820202c0f8c7e1686dad9d3c29ba9d815105a6e81a2ae77bb"} Oct 11 04:23:57 crc kubenswrapper[4798]: I1011 04:23:57.369528 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-l5lmp" event={"ID":"ebd6e80f-3d02-4d21-a9a9-ff2838ca32d0","Type":"ContainerStarted","Data":"d5e8117ebe8ddd8470bde9b0f4953bc83c06f88fe182f5343c6a7f41fe98133d"} Oct 11 04:23:57 crc kubenswrapper[4798]: I1011 04:23:57.394642 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-l5lmp" podStartSLOduration=1.9756026979999999 podStartE2EDuration="2.394622838s" podCreationTimestamp="2025-10-11 04:23:55 +0000 UTC" firstStartedPulling="2025-10-11 04:23:56.117869888 +0000 UTC m=+1731.454159574" lastFinishedPulling="2025-10-11 04:23:56.536890028 +0000 UTC m=+1731.873179714" observedRunningTime="2025-10-11 04:23:57.394539607 +0000 UTC m=+1732.730829333" watchObservedRunningTime="2025-10-11 04:23:57.394622838 +0000 UTC m=+1732.730912524" Oct 11 04:24:04 crc kubenswrapper[4798]: I1011 04:24:04.423527 4798 scope.go:117] "RemoveContainer" containerID="2e8e169485a90028020fcd80b69a1027ed01adf533cd6bd26b4fff5a221bbbae" Oct 11 04:24:04 crc kubenswrapper[4798]: E1011 04:24:04.424224 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h28s2_openshift-machine-config-operator(42571bc8-2186-4e3b-bba9-28f5a8f364d0)\"" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" Oct 11 04:24:06 crc kubenswrapper[4798]: I1011 04:24:06.452153 4798 generic.go:334] "Generic (PLEG): container finished" podID="ebd6e80f-3d02-4d21-a9a9-ff2838ca32d0" containerID="d5e8117ebe8ddd8470bde9b0f4953bc83c06f88fe182f5343c6a7f41fe98133d" exitCode=0 Oct 11 04:24:06 crc kubenswrapper[4798]: I1011 04:24:06.452253 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-l5lmp" event={"ID":"ebd6e80f-3d02-4d21-a9a9-ff2838ca32d0","Type":"ContainerDied","Data":"d5e8117ebe8ddd8470bde9b0f4953bc83c06f88fe182f5343c6a7f41fe98133d"} Oct 11 04:24:07 crc kubenswrapper[4798]: I1011 04:24:07.881218 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-l5lmp" Oct 11 04:24:08 crc kubenswrapper[4798]: I1011 04:24:08.049637 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ebd6e80f-3d02-4d21-a9a9-ff2838ca32d0-ssh-key\") pod \"ebd6e80f-3d02-4d21-a9a9-ff2838ca32d0\" (UID: \"ebd6e80f-3d02-4d21-a9a9-ff2838ca32d0\") " Oct 11 04:24:08 crc kubenswrapper[4798]: I1011 04:24:08.049764 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ebd6e80f-3d02-4d21-a9a9-ff2838ca32d0-inventory\") pod \"ebd6e80f-3d02-4d21-a9a9-ff2838ca32d0\" (UID: \"ebd6e80f-3d02-4d21-a9a9-ff2838ca32d0\") " Oct 11 04:24:08 crc kubenswrapper[4798]: I1011 04:24:08.049894 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ljwg7\" (UniqueName: \"kubernetes.io/projected/ebd6e80f-3d02-4d21-a9a9-ff2838ca32d0-kube-api-access-ljwg7\") pod \"ebd6e80f-3d02-4d21-a9a9-ff2838ca32d0\" (UID: \"ebd6e80f-3d02-4d21-a9a9-ff2838ca32d0\") " Oct 11 04:24:08 crc kubenswrapper[4798]: I1011 04:24:08.056111 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ebd6e80f-3d02-4d21-a9a9-ff2838ca32d0-kube-api-access-ljwg7" (OuterVolumeSpecName: "kube-api-access-ljwg7") pod "ebd6e80f-3d02-4d21-a9a9-ff2838ca32d0" (UID: "ebd6e80f-3d02-4d21-a9a9-ff2838ca32d0"). InnerVolumeSpecName "kube-api-access-ljwg7". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 04:24:08 crc kubenswrapper[4798]: I1011 04:24:08.076847 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ebd6e80f-3d02-4d21-a9a9-ff2838ca32d0-inventory" (OuterVolumeSpecName: "inventory") pod "ebd6e80f-3d02-4d21-a9a9-ff2838ca32d0" (UID: "ebd6e80f-3d02-4d21-a9a9-ff2838ca32d0"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:24:08 crc kubenswrapper[4798]: I1011 04:24:08.098086 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ebd6e80f-3d02-4d21-a9a9-ff2838ca32d0-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "ebd6e80f-3d02-4d21-a9a9-ff2838ca32d0" (UID: "ebd6e80f-3d02-4d21-a9a9-ff2838ca32d0"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:24:08 crc kubenswrapper[4798]: I1011 04:24:08.152272 4798 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ebd6e80f-3d02-4d21-a9a9-ff2838ca32d0-ssh-key\") on node \"crc\" DevicePath \"\"" Oct 11 04:24:08 crc kubenswrapper[4798]: I1011 04:24:08.152313 4798 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ebd6e80f-3d02-4d21-a9a9-ff2838ca32d0-inventory\") on node \"crc\" DevicePath \"\"" Oct 11 04:24:08 crc kubenswrapper[4798]: I1011 04:24:08.152322 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ljwg7\" (UniqueName: \"kubernetes.io/projected/ebd6e80f-3d02-4d21-a9a9-ff2838ca32d0-kube-api-access-ljwg7\") on node \"crc\" DevicePath \"\"" Oct 11 04:24:08 crc kubenswrapper[4798]: I1011 04:24:08.494629 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-l5lmp" event={"ID":"ebd6e80f-3d02-4d21-a9a9-ff2838ca32d0","Type":"ContainerDied","Data":"ffdd340ab717fd3820202c0f8c7e1686dad9d3c29ba9d815105a6e81a2ae77bb"} Oct 11 04:24:08 crc kubenswrapper[4798]: I1011 04:24:08.494697 4798 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ffdd340ab717fd3820202c0f8c7e1686dad9d3c29ba9d815105a6e81a2ae77bb" Oct 11 04:24:08 crc kubenswrapper[4798]: I1011 04:24:08.494822 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-l5lmp" Oct 11 04:24:16 crc kubenswrapper[4798]: I1011 04:24:16.423514 4798 scope.go:117] "RemoveContainer" containerID="2e8e169485a90028020fcd80b69a1027ed01adf533cd6bd26b4fff5a221bbbae" Oct 11 04:24:16 crc kubenswrapper[4798]: E1011 04:24:16.424691 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h28s2_openshift-machine-config-operator(42571bc8-2186-4e3b-bba9-28f5a8f364d0)\"" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" Oct 11 04:24:20 crc kubenswrapper[4798]: I1011 04:24:20.634696 4798 scope.go:117] "RemoveContainer" containerID="409eff34496e683abdf66df063cbb478d44a5e51a9ba3e4e32ab1559d476887a" Oct 11 04:24:20 crc kubenswrapper[4798]: I1011 04:24:20.699144 4798 scope.go:117] "RemoveContainer" containerID="14b5a6837034b5a81eac5c1851fe2b0fed4bb2911bc051c6ae5cf941236cbc05" Oct 11 04:24:20 crc kubenswrapper[4798]: I1011 04:24:20.736259 4798 scope.go:117] "RemoveContainer" containerID="0ab241daf52edb119b3355fdc6d0a8cc66a5baedfe949bad0a95c5b1529d7866" Oct 11 04:24:29 crc kubenswrapper[4798]: I1011 04:24:29.042490 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-cell-mapping-rnnjx"] Oct 11 04:24:29 crc kubenswrapper[4798]: I1011 04:24:29.049802 4798 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-cell-mapping-rnnjx"] Oct 11 04:24:29 crc kubenswrapper[4798]: I1011 04:24:29.424611 4798 scope.go:117] "RemoveContainer" containerID="2e8e169485a90028020fcd80b69a1027ed01adf533cd6bd26b4fff5a221bbbae" Oct 11 04:24:29 crc kubenswrapper[4798]: E1011 04:24:29.425108 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h28s2_openshift-machine-config-operator(42571bc8-2186-4e3b-bba9-28f5a8f364d0)\"" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" Oct 11 04:24:29 crc kubenswrapper[4798]: I1011 04:24:29.441353 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="37feee20-a933-440b-8b62-59566bb9f440" path="/var/lib/kubelet/pods/37feee20-a933-440b-8b62-59566bb9f440/volumes" Oct 11 04:24:41 crc kubenswrapper[4798]: I1011 04:24:41.424340 4798 scope.go:117] "RemoveContainer" containerID="2e8e169485a90028020fcd80b69a1027ed01adf533cd6bd26b4fff5a221bbbae" Oct 11 04:24:41 crc kubenswrapper[4798]: E1011 04:24:41.426669 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h28s2_openshift-machine-config-operator(42571bc8-2186-4e3b-bba9-28f5a8f364d0)\"" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" Oct 11 04:24:56 crc kubenswrapper[4798]: I1011 04:24:56.424025 4798 scope.go:117] "RemoveContainer" containerID="2e8e169485a90028020fcd80b69a1027ed01adf533cd6bd26b4fff5a221bbbae" Oct 11 04:24:56 crc kubenswrapper[4798]: E1011 04:24:56.425102 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h28s2_openshift-machine-config-operator(42571bc8-2186-4e3b-bba9-28f5a8f364d0)\"" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" Oct 11 04:25:10 crc kubenswrapper[4798]: I1011 04:25:10.424968 4798 scope.go:117] "RemoveContainer" containerID="2e8e169485a90028020fcd80b69a1027ed01adf533cd6bd26b4fff5a221bbbae" Oct 11 04:25:11 crc kubenswrapper[4798]: I1011 04:25:11.144547 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" event={"ID":"42571bc8-2186-4e3b-bba9-28f5a8f364d0","Type":"ContainerStarted","Data":"79815571603bad34e91dc55f6905e3697ce42c89636d6c2b96147d668afa53e5"} Oct 11 04:25:20 crc kubenswrapper[4798]: I1011 04:25:20.841905 4798 scope.go:117] "RemoveContainer" containerID="b8f9b12cc44036cadbd2c018e907b8a1f07c337830cbf32f0e0c4d991c918a41" Oct 11 04:27:27 crc kubenswrapper[4798]: I1011 04:27:27.138694 4798 patch_prober.go:28] interesting pod/machine-config-daemon-h28s2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 11 04:27:27 crc kubenswrapper[4798]: I1011 04:27:27.139570 4798 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 11 04:27:57 crc kubenswrapper[4798]: I1011 04:27:57.139263 4798 patch_prober.go:28] interesting pod/machine-config-daemon-h28s2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 11 04:27:57 crc kubenswrapper[4798]: I1011 04:27:57.139933 4798 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 11 04:28:27 crc kubenswrapper[4798]: I1011 04:28:27.138141 4798 patch_prober.go:28] interesting pod/machine-config-daemon-h28s2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 11 04:28:27 crc kubenswrapper[4798]: I1011 04:28:27.139545 4798 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 11 04:28:27 crc kubenswrapper[4798]: I1011 04:28:27.139649 4798 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" Oct 11 04:28:27 crc kubenswrapper[4798]: I1011 04:28:27.140372 4798 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"79815571603bad34e91dc55f6905e3697ce42c89636d6c2b96147d668afa53e5"} pod="openshift-machine-config-operator/machine-config-daemon-h28s2" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Oct 11 04:28:27 crc kubenswrapper[4798]: I1011 04:28:27.140501 4798 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" containerName="machine-config-daemon" containerID="cri-o://79815571603bad34e91dc55f6905e3697ce42c89636d6c2b96147d668afa53e5" gracePeriod=600 Oct 11 04:28:28 crc kubenswrapper[4798]: I1011 04:28:28.122635 4798 generic.go:334] "Generic (PLEG): container finished" podID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" containerID="79815571603bad34e91dc55f6905e3697ce42c89636d6c2b96147d668afa53e5" exitCode=0 Oct 11 04:28:28 crc kubenswrapper[4798]: I1011 04:28:28.122745 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" event={"ID":"42571bc8-2186-4e3b-bba9-28f5a8f364d0","Type":"ContainerDied","Data":"79815571603bad34e91dc55f6905e3697ce42c89636d6c2b96147d668afa53e5"} Oct 11 04:28:28 crc kubenswrapper[4798]: I1011 04:28:28.123601 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" event={"ID":"42571bc8-2186-4e3b-bba9-28f5a8f364d0","Type":"ContainerStarted","Data":"e764c82b5ef4905e7c6c502957bfa38bb423ee4a4677ee706a5d2b87d33d2c1c"} Oct 11 04:28:28 crc kubenswrapper[4798]: I1011 04:28:28.123632 4798 scope.go:117] "RemoveContainer" containerID="2e8e169485a90028020fcd80b69a1027ed01adf533cd6bd26b4fff5a221bbbae" Oct 11 04:28:34 crc kubenswrapper[4798]: E1011 04:28:34.539851 4798 upgradeaware.go:427] Error proxying data from client to backend: readfrom tcp 38.102.83.144:42952->38.102.83.144:44453: write tcp 38.102.83.144:42952->38.102.83.144:44453: write: broken pipe Oct 11 04:28:45 crc kubenswrapper[4798]: I1011 04:28:45.846671 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-7rd8r"] Oct 11 04:28:45 crc kubenswrapper[4798]: I1011 04:28:45.861464 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-7kmk2"] Oct 11 04:28:45 crc kubenswrapper[4798]: I1011 04:28:45.867978 4798 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-7kmk2"] Oct 11 04:28:45 crc kubenswrapper[4798]: I1011 04:28:45.875085 4798 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-7rd8r"] Oct 11 04:28:45 crc kubenswrapper[4798]: I1011 04:28:45.882103 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-dq486"] Oct 11 04:28:45 crc kubenswrapper[4798]: I1011 04:28:45.888525 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-kh7b7"] Oct 11 04:28:45 crc kubenswrapper[4798]: I1011 04:28:45.894138 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-jcscs"] Oct 11 04:28:45 crc kubenswrapper[4798]: I1011 04:28:45.899722 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-bth9j"] Oct 11 04:28:45 crc kubenswrapper[4798]: I1011 04:28:45.907574 4798 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-bth9j"] Oct 11 04:28:45 crc kubenswrapper[4798]: I1011 04:28:45.912514 4798 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-jcscs"] Oct 11 04:28:45 crc kubenswrapper[4798]: I1011 04:28:45.918371 4798 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-dq486"] Oct 11 04:28:45 crc kubenswrapper[4798]: I1011 04:28:45.925165 4798 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-kh7b7"] Oct 11 04:28:45 crc kubenswrapper[4798]: I1011 04:28:45.931358 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-g57xb"] Oct 11 04:28:45 crc kubenswrapper[4798]: I1011 04:28:45.937542 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-l5lmp"] Oct 11 04:28:45 crc kubenswrapper[4798]: I1011 04:28:45.943781 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-cd282"] Oct 11 04:28:45 crc kubenswrapper[4798]: I1011 04:28:45.949621 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-9dv72"] Oct 11 04:28:45 crc kubenswrapper[4798]: I1011 04:28:45.955131 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-79zk4"] Oct 11 04:28:45 crc kubenswrapper[4798]: I1011 04:28:45.960333 4798 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-cd282"] Oct 11 04:28:45 crc kubenswrapper[4798]: I1011 04:28:45.965241 4798 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-l5lmp"] Oct 11 04:28:45 crc kubenswrapper[4798]: I1011 04:28:45.970032 4798 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-9dv72"] Oct 11 04:28:45 crc kubenswrapper[4798]: I1011 04:28:45.974752 4798 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-g57xb"] Oct 11 04:28:45 crc kubenswrapper[4798]: I1011 04:28:45.979538 4798 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-79zk4"] Oct 11 04:28:47 crc kubenswrapper[4798]: I1011 04:28:47.438561 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="326c1bc8-1869-4ca2-8e8b-10356a3ca498" path="/var/lib/kubelet/pods/326c1bc8-1869-4ca2-8e8b-10356a3ca498/volumes" Oct 11 04:28:47 crc kubenswrapper[4798]: I1011 04:28:47.440338 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="80751ff0-128a-4b49-8eb3-a3fb93afd239" path="/var/lib/kubelet/pods/80751ff0-128a-4b49-8eb3-a3fb93afd239/volumes" Oct 11 04:28:47 crc kubenswrapper[4798]: I1011 04:28:47.441238 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8575ef22-7f1e-45b4-aeee-8ece74bb904f" path="/var/lib/kubelet/pods/8575ef22-7f1e-45b4-aeee-8ece74bb904f/volumes" Oct 11 04:28:47 crc kubenswrapper[4798]: I1011 04:28:47.442273 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9342f970-0bdf-41b9-aa43-763845d72472" path="/var/lib/kubelet/pods/9342f970-0bdf-41b9-aa43-763845d72472/volumes" Oct 11 04:28:47 crc kubenswrapper[4798]: I1011 04:28:47.443934 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a0453dd0-3348-43e2-8489-681c44e089a1" path="/var/lib/kubelet/pods/a0453dd0-3348-43e2-8489-681c44e089a1/volumes" Oct 11 04:28:47 crc kubenswrapper[4798]: I1011 04:28:47.444867 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a740bd42-4c8c-42a1-8b39-50e2f9389b40" path="/var/lib/kubelet/pods/a740bd42-4c8c-42a1-8b39-50e2f9389b40/volumes" Oct 11 04:28:47 crc kubenswrapper[4798]: I1011 04:28:47.445806 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b0f1aed4-3351-4dc5-8642-e580307cdb53" path="/var/lib/kubelet/pods/b0f1aed4-3351-4dc5-8642-e580307cdb53/volumes" Oct 11 04:28:47 crc kubenswrapper[4798]: I1011 04:28:47.447704 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bd3df659-3786-42ad-8ebb-00b3a6102640" path="/var/lib/kubelet/pods/bd3df659-3786-42ad-8ebb-00b3a6102640/volumes" Oct 11 04:28:47 crc kubenswrapper[4798]: I1011 04:28:47.448614 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cf5a942a-e817-42ee-a871-b840b9e4aaa0" path="/var/lib/kubelet/pods/cf5a942a-e817-42ee-a871-b840b9e4aaa0/volumes" Oct 11 04:28:47 crc kubenswrapper[4798]: I1011 04:28:47.449597 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="eb105376-3b5c-4a06-97db-ded802e69b66" path="/var/lib/kubelet/pods/eb105376-3b5c-4a06-97db-ded802e69b66/volumes" Oct 11 04:28:47 crc kubenswrapper[4798]: I1011 04:28:47.451325 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ebd6e80f-3d02-4d21-a9a9-ff2838ca32d0" path="/var/lib/kubelet/pods/ebd6e80f-3d02-4d21-a9a9-ff2838ca32d0/volumes" Oct 11 04:28:51 crc kubenswrapper[4798]: I1011 04:28:51.647032 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-hl6zr"] Oct 11 04:28:51 crc kubenswrapper[4798]: E1011 04:28:51.647669 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ebd6e80f-3d02-4d21-a9a9-ff2838ca32d0" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Oct 11 04:28:51 crc kubenswrapper[4798]: I1011 04:28:51.647683 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="ebd6e80f-3d02-4d21-a9a9-ff2838ca32d0" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Oct 11 04:28:51 crc kubenswrapper[4798]: I1011 04:28:51.647856 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="ebd6e80f-3d02-4d21-a9a9-ff2838ca32d0" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Oct 11 04:28:51 crc kubenswrapper[4798]: I1011 04:28:51.648408 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-hl6zr" Oct 11 04:28:51 crc kubenswrapper[4798]: I1011 04:28:51.650948 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceph-conf-files" Oct 11 04:28:51 crc kubenswrapper[4798]: I1011 04:28:51.651007 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Oct 11 04:28:51 crc kubenswrapper[4798]: I1011 04:28:51.651540 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Oct 11 04:28:51 crc kubenswrapper[4798]: I1011 04:28:51.652773 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Oct 11 04:28:51 crc kubenswrapper[4798]: I1011 04:28:51.652952 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-mgsh6" Oct 11 04:28:51 crc kubenswrapper[4798]: I1011 04:28:51.683876 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-hl6zr"] Oct 11 04:28:51 crc kubenswrapper[4798]: I1011 04:28:51.718383 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/471b7b25-2875-473e-bb6e-0509b527b7d3-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-hl6zr\" (UID: \"471b7b25-2875-473e-bb6e-0509b527b7d3\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-hl6zr" Oct 11 04:28:51 crc kubenswrapper[4798]: I1011 04:28:51.718529 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bwdwp\" (UniqueName: \"kubernetes.io/projected/471b7b25-2875-473e-bb6e-0509b527b7d3-kube-api-access-bwdwp\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-hl6zr\" (UID: \"471b7b25-2875-473e-bb6e-0509b527b7d3\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-hl6zr" Oct 11 04:28:51 crc kubenswrapper[4798]: I1011 04:28:51.718593 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/471b7b25-2875-473e-bb6e-0509b527b7d3-ssh-key\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-hl6zr\" (UID: \"471b7b25-2875-473e-bb6e-0509b527b7d3\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-hl6zr" Oct 11 04:28:51 crc kubenswrapper[4798]: I1011 04:28:51.718658 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/471b7b25-2875-473e-bb6e-0509b527b7d3-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-hl6zr\" (UID: \"471b7b25-2875-473e-bb6e-0509b527b7d3\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-hl6zr" Oct 11 04:28:51 crc kubenswrapper[4798]: I1011 04:28:51.718692 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/471b7b25-2875-473e-bb6e-0509b527b7d3-ceph\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-hl6zr\" (UID: \"471b7b25-2875-473e-bb6e-0509b527b7d3\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-hl6zr" Oct 11 04:28:51 crc kubenswrapper[4798]: I1011 04:28:51.820008 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/471b7b25-2875-473e-bb6e-0509b527b7d3-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-hl6zr\" (UID: \"471b7b25-2875-473e-bb6e-0509b527b7d3\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-hl6zr" Oct 11 04:28:51 crc kubenswrapper[4798]: I1011 04:28:51.820123 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bwdwp\" (UniqueName: \"kubernetes.io/projected/471b7b25-2875-473e-bb6e-0509b527b7d3-kube-api-access-bwdwp\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-hl6zr\" (UID: \"471b7b25-2875-473e-bb6e-0509b527b7d3\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-hl6zr" Oct 11 04:28:51 crc kubenswrapper[4798]: I1011 04:28:51.820188 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/471b7b25-2875-473e-bb6e-0509b527b7d3-ssh-key\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-hl6zr\" (UID: \"471b7b25-2875-473e-bb6e-0509b527b7d3\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-hl6zr" Oct 11 04:28:51 crc kubenswrapper[4798]: I1011 04:28:51.820254 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/471b7b25-2875-473e-bb6e-0509b527b7d3-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-hl6zr\" (UID: \"471b7b25-2875-473e-bb6e-0509b527b7d3\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-hl6zr" Oct 11 04:28:51 crc kubenswrapper[4798]: I1011 04:28:51.820286 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/471b7b25-2875-473e-bb6e-0509b527b7d3-ceph\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-hl6zr\" (UID: \"471b7b25-2875-473e-bb6e-0509b527b7d3\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-hl6zr" Oct 11 04:28:51 crc kubenswrapper[4798]: I1011 04:28:51.828576 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/471b7b25-2875-473e-bb6e-0509b527b7d3-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-hl6zr\" (UID: \"471b7b25-2875-473e-bb6e-0509b527b7d3\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-hl6zr" Oct 11 04:28:51 crc kubenswrapper[4798]: I1011 04:28:51.828618 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/471b7b25-2875-473e-bb6e-0509b527b7d3-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-hl6zr\" (UID: \"471b7b25-2875-473e-bb6e-0509b527b7d3\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-hl6zr" Oct 11 04:28:51 crc kubenswrapper[4798]: I1011 04:28:51.829386 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/471b7b25-2875-473e-bb6e-0509b527b7d3-ssh-key\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-hl6zr\" (UID: \"471b7b25-2875-473e-bb6e-0509b527b7d3\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-hl6zr" Oct 11 04:28:51 crc kubenswrapper[4798]: I1011 04:28:51.830103 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/471b7b25-2875-473e-bb6e-0509b527b7d3-ceph\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-hl6zr\" (UID: \"471b7b25-2875-473e-bb6e-0509b527b7d3\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-hl6zr" Oct 11 04:28:51 crc kubenswrapper[4798]: I1011 04:28:51.839672 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bwdwp\" (UniqueName: \"kubernetes.io/projected/471b7b25-2875-473e-bb6e-0509b527b7d3-kube-api-access-bwdwp\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-hl6zr\" (UID: \"471b7b25-2875-473e-bb6e-0509b527b7d3\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-hl6zr" Oct 11 04:28:52 crc kubenswrapper[4798]: I1011 04:28:52.007515 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-hl6zr" Oct 11 04:28:52 crc kubenswrapper[4798]: I1011 04:28:52.503557 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-hl6zr"] Oct 11 04:28:52 crc kubenswrapper[4798]: I1011 04:28:52.514677 4798 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Oct 11 04:28:53 crc kubenswrapper[4798]: I1011 04:28:53.352154 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-hl6zr" event={"ID":"471b7b25-2875-473e-bb6e-0509b527b7d3","Type":"ContainerStarted","Data":"4211c59138c9713278cdc0a99f5155d679a0c6fa9b4abc9b912d1b2b82e8dd3c"} Oct 11 04:28:54 crc kubenswrapper[4798]: I1011 04:28:54.363027 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-hl6zr" event={"ID":"471b7b25-2875-473e-bb6e-0509b527b7d3","Type":"ContainerStarted","Data":"a806f4e094c6340c145863295d1838917761b6143eef2b7ff83079c9838cffb8"} Oct 11 04:28:54 crc kubenswrapper[4798]: I1011 04:28:54.383998 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-hl6zr" podStartSLOduration=2.679662565 podStartE2EDuration="3.383843387s" podCreationTimestamp="2025-10-11 04:28:51 +0000 UTC" firstStartedPulling="2025-10-11 04:28:52.513939212 +0000 UTC m=+2027.850228938" lastFinishedPulling="2025-10-11 04:28:53.218120064 +0000 UTC m=+2028.554409760" observedRunningTime="2025-10-11 04:28:54.38010915 +0000 UTC m=+2029.716398846" watchObservedRunningTime="2025-10-11 04:28:54.383843387 +0000 UTC m=+2029.720133083" Oct 11 04:29:05 crc kubenswrapper[4798]: I1011 04:29:05.487690 4798 generic.go:334] "Generic (PLEG): container finished" podID="471b7b25-2875-473e-bb6e-0509b527b7d3" containerID="a806f4e094c6340c145863295d1838917761b6143eef2b7ff83079c9838cffb8" exitCode=0 Oct 11 04:29:05 crc kubenswrapper[4798]: I1011 04:29:05.487775 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-hl6zr" event={"ID":"471b7b25-2875-473e-bb6e-0509b527b7d3","Type":"ContainerDied","Data":"a806f4e094c6340c145863295d1838917761b6143eef2b7ff83079c9838cffb8"} Oct 11 04:29:06 crc kubenswrapper[4798]: I1011 04:29:06.887361 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-hl6zr" Oct 11 04:29:07 crc kubenswrapper[4798]: I1011 04:29:07.010330 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/471b7b25-2875-473e-bb6e-0509b527b7d3-ssh-key\") pod \"471b7b25-2875-473e-bb6e-0509b527b7d3\" (UID: \"471b7b25-2875-473e-bb6e-0509b527b7d3\") " Oct 11 04:29:07 crc kubenswrapper[4798]: I1011 04:29:07.010382 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bwdwp\" (UniqueName: \"kubernetes.io/projected/471b7b25-2875-473e-bb6e-0509b527b7d3-kube-api-access-bwdwp\") pod \"471b7b25-2875-473e-bb6e-0509b527b7d3\" (UID: \"471b7b25-2875-473e-bb6e-0509b527b7d3\") " Oct 11 04:29:07 crc kubenswrapper[4798]: I1011 04:29:07.010456 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/471b7b25-2875-473e-bb6e-0509b527b7d3-repo-setup-combined-ca-bundle\") pod \"471b7b25-2875-473e-bb6e-0509b527b7d3\" (UID: \"471b7b25-2875-473e-bb6e-0509b527b7d3\") " Oct 11 04:29:07 crc kubenswrapper[4798]: I1011 04:29:07.010550 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/471b7b25-2875-473e-bb6e-0509b527b7d3-inventory\") pod \"471b7b25-2875-473e-bb6e-0509b527b7d3\" (UID: \"471b7b25-2875-473e-bb6e-0509b527b7d3\") " Oct 11 04:29:07 crc kubenswrapper[4798]: I1011 04:29:07.010652 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/471b7b25-2875-473e-bb6e-0509b527b7d3-ceph\") pod \"471b7b25-2875-473e-bb6e-0509b527b7d3\" (UID: \"471b7b25-2875-473e-bb6e-0509b527b7d3\") " Oct 11 04:29:07 crc kubenswrapper[4798]: I1011 04:29:07.015972 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/471b7b25-2875-473e-bb6e-0509b527b7d3-kube-api-access-bwdwp" (OuterVolumeSpecName: "kube-api-access-bwdwp") pod "471b7b25-2875-473e-bb6e-0509b527b7d3" (UID: "471b7b25-2875-473e-bb6e-0509b527b7d3"). InnerVolumeSpecName "kube-api-access-bwdwp". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 04:29:07 crc kubenswrapper[4798]: I1011 04:29:07.016258 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/471b7b25-2875-473e-bb6e-0509b527b7d3-repo-setup-combined-ca-bundle" (OuterVolumeSpecName: "repo-setup-combined-ca-bundle") pod "471b7b25-2875-473e-bb6e-0509b527b7d3" (UID: "471b7b25-2875-473e-bb6e-0509b527b7d3"). InnerVolumeSpecName "repo-setup-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:29:07 crc kubenswrapper[4798]: I1011 04:29:07.017657 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/471b7b25-2875-473e-bb6e-0509b527b7d3-ceph" (OuterVolumeSpecName: "ceph") pod "471b7b25-2875-473e-bb6e-0509b527b7d3" (UID: "471b7b25-2875-473e-bb6e-0509b527b7d3"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:29:07 crc kubenswrapper[4798]: I1011 04:29:07.037832 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/471b7b25-2875-473e-bb6e-0509b527b7d3-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "471b7b25-2875-473e-bb6e-0509b527b7d3" (UID: "471b7b25-2875-473e-bb6e-0509b527b7d3"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:29:07 crc kubenswrapper[4798]: I1011 04:29:07.043428 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/471b7b25-2875-473e-bb6e-0509b527b7d3-inventory" (OuterVolumeSpecName: "inventory") pod "471b7b25-2875-473e-bb6e-0509b527b7d3" (UID: "471b7b25-2875-473e-bb6e-0509b527b7d3"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:29:07 crc kubenswrapper[4798]: I1011 04:29:07.113773 4798 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/471b7b25-2875-473e-bb6e-0509b527b7d3-ceph\") on node \"crc\" DevicePath \"\"" Oct 11 04:29:07 crc kubenswrapper[4798]: I1011 04:29:07.113807 4798 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/471b7b25-2875-473e-bb6e-0509b527b7d3-ssh-key\") on node \"crc\" DevicePath \"\"" Oct 11 04:29:07 crc kubenswrapper[4798]: I1011 04:29:07.113820 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bwdwp\" (UniqueName: \"kubernetes.io/projected/471b7b25-2875-473e-bb6e-0509b527b7d3-kube-api-access-bwdwp\") on node \"crc\" DevicePath \"\"" Oct 11 04:29:07 crc kubenswrapper[4798]: I1011 04:29:07.113832 4798 reconciler_common.go:293] "Volume detached for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/471b7b25-2875-473e-bb6e-0509b527b7d3-repo-setup-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 11 04:29:07 crc kubenswrapper[4798]: I1011 04:29:07.113843 4798 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/471b7b25-2875-473e-bb6e-0509b527b7d3-inventory\") on node \"crc\" DevicePath \"\"" Oct 11 04:29:07 crc kubenswrapper[4798]: I1011 04:29:07.506198 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-hl6zr" event={"ID":"471b7b25-2875-473e-bb6e-0509b527b7d3","Type":"ContainerDied","Data":"4211c59138c9713278cdc0a99f5155d679a0c6fa9b4abc9b912d1b2b82e8dd3c"} Oct 11 04:29:07 crc kubenswrapper[4798]: I1011 04:29:07.506229 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-hl6zr" Oct 11 04:29:07 crc kubenswrapper[4798]: I1011 04:29:07.506246 4798 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4211c59138c9713278cdc0a99f5155d679a0c6fa9b4abc9b912d1b2b82e8dd3c" Oct 11 04:29:07 crc kubenswrapper[4798]: I1011 04:29:07.580197 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-msblf"] Oct 11 04:29:07 crc kubenswrapper[4798]: E1011 04:29:07.580545 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="471b7b25-2875-473e-bb6e-0509b527b7d3" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Oct 11 04:29:07 crc kubenswrapper[4798]: I1011 04:29:07.580562 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="471b7b25-2875-473e-bb6e-0509b527b7d3" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Oct 11 04:29:07 crc kubenswrapper[4798]: I1011 04:29:07.580720 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="471b7b25-2875-473e-bb6e-0509b527b7d3" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Oct 11 04:29:07 crc kubenswrapper[4798]: I1011 04:29:07.581275 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-msblf" Oct 11 04:29:07 crc kubenswrapper[4798]: I1011 04:29:07.583566 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceph-conf-files" Oct 11 04:29:07 crc kubenswrapper[4798]: I1011 04:29:07.585234 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-mgsh6" Oct 11 04:29:07 crc kubenswrapper[4798]: I1011 04:29:07.585255 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Oct 11 04:29:07 crc kubenswrapper[4798]: I1011 04:29:07.585308 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Oct 11 04:29:07 crc kubenswrapper[4798]: I1011 04:29:07.586461 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Oct 11 04:29:07 crc kubenswrapper[4798]: I1011 04:29:07.630259 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-msblf"] Oct 11 04:29:07 crc kubenswrapper[4798]: I1011 04:29:07.631093 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/6c616614-d70b-48a1-864f-df99b3bb33f3-ssh-key\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-msblf\" (UID: \"6c616614-d70b-48a1-864f-df99b3bb33f3\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-msblf" Oct 11 04:29:07 crc kubenswrapper[4798]: I1011 04:29:07.631226 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/6c616614-d70b-48a1-864f-df99b3bb33f3-ceph\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-msblf\" (UID: \"6c616614-d70b-48a1-864f-df99b3bb33f3\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-msblf" Oct 11 04:29:07 crc kubenswrapper[4798]: I1011 04:29:07.631273 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fjgz4\" (UniqueName: \"kubernetes.io/projected/6c616614-d70b-48a1-864f-df99b3bb33f3-kube-api-access-fjgz4\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-msblf\" (UID: \"6c616614-d70b-48a1-864f-df99b3bb33f3\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-msblf" Oct 11 04:29:07 crc kubenswrapper[4798]: I1011 04:29:07.631349 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6c616614-d70b-48a1-864f-df99b3bb33f3-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-msblf\" (UID: \"6c616614-d70b-48a1-864f-df99b3bb33f3\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-msblf" Oct 11 04:29:07 crc kubenswrapper[4798]: I1011 04:29:07.631438 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6c616614-d70b-48a1-864f-df99b3bb33f3-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-msblf\" (UID: \"6c616614-d70b-48a1-864f-df99b3bb33f3\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-msblf" Oct 11 04:29:07 crc kubenswrapper[4798]: I1011 04:29:07.733491 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/6c616614-d70b-48a1-864f-df99b3bb33f3-ceph\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-msblf\" (UID: \"6c616614-d70b-48a1-864f-df99b3bb33f3\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-msblf" Oct 11 04:29:07 crc kubenswrapper[4798]: I1011 04:29:07.733552 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fjgz4\" (UniqueName: \"kubernetes.io/projected/6c616614-d70b-48a1-864f-df99b3bb33f3-kube-api-access-fjgz4\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-msblf\" (UID: \"6c616614-d70b-48a1-864f-df99b3bb33f3\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-msblf" Oct 11 04:29:07 crc kubenswrapper[4798]: I1011 04:29:07.733630 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6c616614-d70b-48a1-864f-df99b3bb33f3-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-msblf\" (UID: \"6c616614-d70b-48a1-864f-df99b3bb33f3\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-msblf" Oct 11 04:29:07 crc kubenswrapper[4798]: I1011 04:29:07.733683 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6c616614-d70b-48a1-864f-df99b3bb33f3-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-msblf\" (UID: \"6c616614-d70b-48a1-864f-df99b3bb33f3\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-msblf" Oct 11 04:29:07 crc kubenswrapper[4798]: I1011 04:29:07.733886 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/6c616614-d70b-48a1-864f-df99b3bb33f3-ssh-key\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-msblf\" (UID: \"6c616614-d70b-48a1-864f-df99b3bb33f3\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-msblf" Oct 11 04:29:07 crc kubenswrapper[4798]: I1011 04:29:07.738266 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6c616614-d70b-48a1-864f-df99b3bb33f3-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-msblf\" (UID: \"6c616614-d70b-48a1-864f-df99b3bb33f3\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-msblf" Oct 11 04:29:07 crc kubenswrapper[4798]: I1011 04:29:07.738483 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/6c616614-d70b-48a1-864f-df99b3bb33f3-ceph\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-msblf\" (UID: \"6c616614-d70b-48a1-864f-df99b3bb33f3\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-msblf" Oct 11 04:29:07 crc kubenswrapper[4798]: I1011 04:29:07.738513 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6c616614-d70b-48a1-864f-df99b3bb33f3-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-msblf\" (UID: \"6c616614-d70b-48a1-864f-df99b3bb33f3\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-msblf" Oct 11 04:29:07 crc kubenswrapper[4798]: I1011 04:29:07.739150 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/6c616614-d70b-48a1-864f-df99b3bb33f3-ssh-key\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-msblf\" (UID: \"6c616614-d70b-48a1-864f-df99b3bb33f3\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-msblf" Oct 11 04:29:07 crc kubenswrapper[4798]: I1011 04:29:07.752114 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fjgz4\" (UniqueName: \"kubernetes.io/projected/6c616614-d70b-48a1-864f-df99b3bb33f3-kube-api-access-fjgz4\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-msblf\" (UID: \"6c616614-d70b-48a1-864f-df99b3bb33f3\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-msblf" Oct 11 04:29:07 crc kubenswrapper[4798]: I1011 04:29:07.907904 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-msblf" Oct 11 04:29:08 crc kubenswrapper[4798]: I1011 04:29:08.436779 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-msblf"] Oct 11 04:29:08 crc kubenswrapper[4798]: I1011 04:29:08.533645 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-msblf" event={"ID":"6c616614-d70b-48a1-864f-df99b3bb33f3","Type":"ContainerStarted","Data":"646a94d322ae4eb7d4ab3b63bc5fed88dd6e82b4d726d619b1ba2ab6cd8ac602"} Oct 11 04:29:09 crc kubenswrapper[4798]: I1011 04:29:09.546762 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-msblf" event={"ID":"6c616614-d70b-48a1-864f-df99b3bb33f3","Type":"ContainerStarted","Data":"2a50336ab7851f3b50d10736db8d64a8a4130f1ad25db424e087799826aa8fca"} Oct 11 04:29:09 crc kubenswrapper[4798]: I1011 04:29:09.574974 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-msblf" podStartSLOduration=2.15496656 podStartE2EDuration="2.574949191s" podCreationTimestamp="2025-10-11 04:29:07 +0000 UTC" firstStartedPulling="2025-10-11 04:29:08.446733492 +0000 UTC m=+2043.783023178" lastFinishedPulling="2025-10-11 04:29:08.866716113 +0000 UTC m=+2044.203005809" observedRunningTime="2025-10-11 04:29:09.565479699 +0000 UTC m=+2044.901769425" watchObservedRunningTime="2025-10-11 04:29:09.574949191 +0000 UTC m=+2044.911238917" Oct 11 04:29:20 crc kubenswrapper[4798]: I1011 04:29:20.973574 4798 scope.go:117] "RemoveContainer" containerID="a1a6b43eace6f5289328a7e4d9afa14af45b217f64f08807ffa9d0292b95ecf0" Oct 11 04:29:21 crc kubenswrapper[4798]: I1011 04:29:21.051857 4798 scope.go:117] "RemoveContainer" containerID="40d1cb237ac5be84bd9bbc89bc64afbb7c8b554afa9505468407a9a2f73465d2" Oct 11 04:29:21 crc kubenswrapper[4798]: I1011 04:29:21.120948 4798 scope.go:117] "RemoveContainer" containerID="7188573e76de3d9f65011bd8f18f171df4c220831714fa9bc31d8f4d9d46a525" Oct 11 04:29:21 crc kubenswrapper[4798]: I1011 04:29:21.161728 4798 scope.go:117] "RemoveContainer" containerID="93817d4d2ebb874672fa2a7ba370655f4907b48c493dd836a8db7a2b00160f52" Oct 11 04:29:21 crc kubenswrapper[4798]: I1011 04:29:21.189348 4798 scope.go:117] "RemoveContainer" containerID="f207351effe69a438689177ef964cc494a99b6d0c1e4a2fca9d0672064364f93" Oct 11 04:29:21 crc kubenswrapper[4798]: I1011 04:29:21.230163 4798 scope.go:117] "RemoveContainer" containerID="caa9d29e42c40384d44151d80a7dcda6cc89eade70ff41e804b0fc0f7296030a" Oct 11 04:29:21 crc kubenswrapper[4798]: I1011 04:29:21.278190 4798 scope.go:117] "RemoveContainer" containerID="9e4450dc66f6c28607d124753046d846ce33c520e4b6c6ea4985f9ad1cebc521" Oct 11 04:29:21 crc kubenswrapper[4798]: I1011 04:29:21.311556 4798 scope.go:117] "RemoveContainer" containerID="961bbec559ae3d181a04931cf29dfa73c41debf56553105153d2a5f2347b8b7b" Oct 11 04:29:40 crc kubenswrapper[4798]: I1011 04:29:40.139184 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-z8kc4"] Oct 11 04:29:40 crc kubenswrapper[4798]: I1011 04:29:40.148479 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-z8kc4" Oct 11 04:29:40 crc kubenswrapper[4798]: I1011 04:29:40.186349 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-z8kc4"] Oct 11 04:29:40 crc kubenswrapper[4798]: I1011 04:29:40.277840 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/929d0839-6c1e-4da5-b961-660dd13670d3-utilities\") pod \"certified-operators-z8kc4\" (UID: \"929d0839-6c1e-4da5-b961-660dd13670d3\") " pod="openshift-marketplace/certified-operators-z8kc4" Oct 11 04:29:40 crc kubenswrapper[4798]: I1011 04:29:40.277940 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/929d0839-6c1e-4da5-b961-660dd13670d3-catalog-content\") pod \"certified-operators-z8kc4\" (UID: \"929d0839-6c1e-4da5-b961-660dd13670d3\") " pod="openshift-marketplace/certified-operators-z8kc4" Oct 11 04:29:40 crc kubenswrapper[4798]: I1011 04:29:40.278011 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tmrv9\" (UniqueName: \"kubernetes.io/projected/929d0839-6c1e-4da5-b961-660dd13670d3-kube-api-access-tmrv9\") pod \"certified-operators-z8kc4\" (UID: \"929d0839-6c1e-4da5-b961-660dd13670d3\") " pod="openshift-marketplace/certified-operators-z8kc4" Oct 11 04:29:40 crc kubenswrapper[4798]: I1011 04:29:40.379692 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/929d0839-6c1e-4da5-b961-660dd13670d3-utilities\") pod \"certified-operators-z8kc4\" (UID: \"929d0839-6c1e-4da5-b961-660dd13670d3\") " pod="openshift-marketplace/certified-operators-z8kc4" Oct 11 04:29:40 crc kubenswrapper[4798]: I1011 04:29:40.379777 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/929d0839-6c1e-4da5-b961-660dd13670d3-catalog-content\") pod \"certified-operators-z8kc4\" (UID: \"929d0839-6c1e-4da5-b961-660dd13670d3\") " pod="openshift-marketplace/certified-operators-z8kc4" Oct 11 04:29:40 crc kubenswrapper[4798]: I1011 04:29:40.379833 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tmrv9\" (UniqueName: \"kubernetes.io/projected/929d0839-6c1e-4da5-b961-660dd13670d3-kube-api-access-tmrv9\") pod \"certified-operators-z8kc4\" (UID: \"929d0839-6c1e-4da5-b961-660dd13670d3\") " pod="openshift-marketplace/certified-operators-z8kc4" Oct 11 04:29:40 crc kubenswrapper[4798]: I1011 04:29:40.380359 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/929d0839-6c1e-4da5-b961-660dd13670d3-utilities\") pod \"certified-operators-z8kc4\" (UID: \"929d0839-6c1e-4da5-b961-660dd13670d3\") " pod="openshift-marketplace/certified-operators-z8kc4" Oct 11 04:29:40 crc kubenswrapper[4798]: I1011 04:29:40.380383 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/929d0839-6c1e-4da5-b961-660dd13670d3-catalog-content\") pod \"certified-operators-z8kc4\" (UID: \"929d0839-6c1e-4da5-b961-660dd13670d3\") " pod="openshift-marketplace/certified-operators-z8kc4" Oct 11 04:29:40 crc kubenswrapper[4798]: I1011 04:29:40.403514 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tmrv9\" (UniqueName: \"kubernetes.io/projected/929d0839-6c1e-4da5-b961-660dd13670d3-kube-api-access-tmrv9\") pod \"certified-operators-z8kc4\" (UID: \"929d0839-6c1e-4da5-b961-660dd13670d3\") " pod="openshift-marketplace/certified-operators-z8kc4" Oct 11 04:29:40 crc kubenswrapper[4798]: I1011 04:29:40.471827 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-z8kc4" Oct 11 04:29:41 crc kubenswrapper[4798]: I1011 04:29:41.008995 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-z8kc4"] Oct 11 04:29:41 crc kubenswrapper[4798]: W1011 04:29:41.009956 4798 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod929d0839_6c1e_4da5_b961_660dd13670d3.slice/crio-c3bfc0fbaa0db734810c4ba574b3bb13e9b11525fff11cf2c68eeea02318e646 WatchSource:0}: Error finding container c3bfc0fbaa0db734810c4ba574b3bb13e9b11525fff11cf2c68eeea02318e646: Status 404 returned error can't find the container with id c3bfc0fbaa0db734810c4ba574b3bb13e9b11525fff11cf2c68eeea02318e646 Oct 11 04:29:41 crc kubenswrapper[4798]: I1011 04:29:41.842979 4798 generic.go:334] "Generic (PLEG): container finished" podID="929d0839-6c1e-4da5-b961-660dd13670d3" containerID="204ce6e294792ca1de6dae205d8d507477fb0dd00ccd2675f9f447a70bf3475d" exitCode=0 Oct 11 04:29:41 crc kubenswrapper[4798]: I1011 04:29:41.843540 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-z8kc4" event={"ID":"929d0839-6c1e-4da5-b961-660dd13670d3","Type":"ContainerDied","Data":"204ce6e294792ca1de6dae205d8d507477fb0dd00ccd2675f9f447a70bf3475d"} Oct 11 04:29:41 crc kubenswrapper[4798]: I1011 04:29:41.844211 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-z8kc4" event={"ID":"929d0839-6c1e-4da5-b961-660dd13670d3","Type":"ContainerStarted","Data":"c3bfc0fbaa0db734810c4ba574b3bb13e9b11525fff11cf2c68eeea02318e646"} Oct 11 04:29:42 crc kubenswrapper[4798]: I1011 04:29:42.852840 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-z8kc4" event={"ID":"929d0839-6c1e-4da5-b961-660dd13670d3","Type":"ContainerStarted","Data":"a6882647b6d5bee28dcd91ab5151e175dfb1d2e9227755e30067fe04e015fb67"} Oct 11 04:29:43 crc kubenswrapper[4798]: I1011 04:29:43.863953 4798 generic.go:334] "Generic (PLEG): container finished" podID="929d0839-6c1e-4da5-b961-660dd13670d3" containerID="a6882647b6d5bee28dcd91ab5151e175dfb1d2e9227755e30067fe04e015fb67" exitCode=0 Oct 11 04:29:43 crc kubenswrapper[4798]: I1011 04:29:43.864829 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-z8kc4" event={"ID":"929d0839-6c1e-4da5-b961-660dd13670d3","Type":"ContainerDied","Data":"a6882647b6d5bee28dcd91ab5151e175dfb1d2e9227755e30067fe04e015fb67"} Oct 11 04:29:44 crc kubenswrapper[4798]: I1011 04:29:44.882734 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-z8kc4" event={"ID":"929d0839-6c1e-4da5-b961-660dd13670d3","Type":"ContainerStarted","Data":"6a0e6fdd86d1253007ad58e37035b8747d31223884ff572d5cc7a3e8c3e90743"} Oct 11 04:29:44 crc kubenswrapper[4798]: I1011 04:29:44.903131 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-z8kc4" podStartSLOduration=2.408580464 podStartE2EDuration="4.903116076s" podCreationTimestamp="2025-10-11 04:29:40 +0000 UTC" firstStartedPulling="2025-10-11 04:29:41.845284128 +0000 UTC m=+2077.181573824" lastFinishedPulling="2025-10-11 04:29:44.33981974 +0000 UTC m=+2079.676109436" observedRunningTime="2025-10-11 04:29:44.900922034 +0000 UTC m=+2080.237211730" watchObservedRunningTime="2025-10-11 04:29:44.903116076 +0000 UTC m=+2080.239405762" Oct 11 04:29:47 crc kubenswrapper[4798]: I1011 04:29:47.528600 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-wctwr"] Oct 11 04:29:47 crc kubenswrapper[4798]: I1011 04:29:47.575326 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wctwr" Oct 11 04:29:47 crc kubenswrapper[4798]: I1011 04:29:47.575450 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-wctwr"] Oct 11 04:29:47 crc kubenswrapper[4798]: I1011 04:29:47.628566 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b22w7\" (UniqueName: \"kubernetes.io/projected/dca2ef18-41f0-4b6d-8b39-d6e2b9988274-kube-api-access-b22w7\") pod \"redhat-operators-wctwr\" (UID: \"dca2ef18-41f0-4b6d-8b39-d6e2b9988274\") " pod="openshift-marketplace/redhat-operators-wctwr" Oct 11 04:29:47 crc kubenswrapper[4798]: I1011 04:29:47.628878 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dca2ef18-41f0-4b6d-8b39-d6e2b9988274-utilities\") pod \"redhat-operators-wctwr\" (UID: \"dca2ef18-41f0-4b6d-8b39-d6e2b9988274\") " pod="openshift-marketplace/redhat-operators-wctwr" Oct 11 04:29:47 crc kubenswrapper[4798]: I1011 04:29:47.629092 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dca2ef18-41f0-4b6d-8b39-d6e2b9988274-catalog-content\") pod \"redhat-operators-wctwr\" (UID: \"dca2ef18-41f0-4b6d-8b39-d6e2b9988274\") " pod="openshift-marketplace/redhat-operators-wctwr" Oct 11 04:29:47 crc kubenswrapper[4798]: I1011 04:29:47.731185 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dca2ef18-41f0-4b6d-8b39-d6e2b9988274-catalog-content\") pod \"redhat-operators-wctwr\" (UID: \"dca2ef18-41f0-4b6d-8b39-d6e2b9988274\") " pod="openshift-marketplace/redhat-operators-wctwr" Oct 11 04:29:47 crc kubenswrapper[4798]: I1011 04:29:47.731545 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b22w7\" (UniqueName: \"kubernetes.io/projected/dca2ef18-41f0-4b6d-8b39-d6e2b9988274-kube-api-access-b22w7\") pod \"redhat-operators-wctwr\" (UID: \"dca2ef18-41f0-4b6d-8b39-d6e2b9988274\") " pod="openshift-marketplace/redhat-operators-wctwr" Oct 11 04:29:47 crc kubenswrapper[4798]: I1011 04:29:47.731703 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dca2ef18-41f0-4b6d-8b39-d6e2b9988274-utilities\") pod \"redhat-operators-wctwr\" (UID: \"dca2ef18-41f0-4b6d-8b39-d6e2b9988274\") " pod="openshift-marketplace/redhat-operators-wctwr" Oct 11 04:29:47 crc kubenswrapper[4798]: I1011 04:29:47.731728 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dca2ef18-41f0-4b6d-8b39-d6e2b9988274-catalog-content\") pod \"redhat-operators-wctwr\" (UID: \"dca2ef18-41f0-4b6d-8b39-d6e2b9988274\") " pod="openshift-marketplace/redhat-operators-wctwr" Oct 11 04:29:47 crc kubenswrapper[4798]: I1011 04:29:47.732301 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dca2ef18-41f0-4b6d-8b39-d6e2b9988274-utilities\") pod \"redhat-operators-wctwr\" (UID: \"dca2ef18-41f0-4b6d-8b39-d6e2b9988274\") " pod="openshift-marketplace/redhat-operators-wctwr" Oct 11 04:29:47 crc kubenswrapper[4798]: I1011 04:29:47.753380 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b22w7\" (UniqueName: \"kubernetes.io/projected/dca2ef18-41f0-4b6d-8b39-d6e2b9988274-kube-api-access-b22w7\") pod \"redhat-operators-wctwr\" (UID: \"dca2ef18-41f0-4b6d-8b39-d6e2b9988274\") " pod="openshift-marketplace/redhat-operators-wctwr" Oct 11 04:29:47 crc kubenswrapper[4798]: I1011 04:29:47.897945 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wctwr" Oct 11 04:29:48 crc kubenswrapper[4798]: I1011 04:29:48.157938 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-wctwr"] Oct 11 04:29:48 crc kubenswrapper[4798]: I1011 04:29:48.915231 4798 generic.go:334] "Generic (PLEG): container finished" podID="dca2ef18-41f0-4b6d-8b39-d6e2b9988274" containerID="6deb78e43fead796034510eca5cc0d12fe57cd8a9ae1e180ea5637f5917579e7" exitCode=0 Oct 11 04:29:48 crc kubenswrapper[4798]: I1011 04:29:48.915561 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wctwr" event={"ID":"dca2ef18-41f0-4b6d-8b39-d6e2b9988274","Type":"ContainerDied","Data":"6deb78e43fead796034510eca5cc0d12fe57cd8a9ae1e180ea5637f5917579e7"} Oct 11 04:29:48 crc kubenswrapper[4798]: I1011 04:29:48.915687 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wctwr" event={"ID":"dca2ef18-41f0-4b6d-8b39-d6e2b9988274","Type":"ContainerStarted","Data":"16983e5d4c40c04b5c1c4c2142b8c89c6eef7446d60c81d0cacad102eade6674"} Oct 11 04:29:49 crc kubenswrapper[4798]: I1011 04:29:49.925403 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wctwr" event={"ID":"dca2ef18-41f0-4b6d-8b39-d6e2b9988274","Type":"ContainerStarted","Data":"31770db84591579d0a434f82d0911e6916e1c2e17e51621d5f007271f105307a"} Oct 11 04:29:50 crc kubenswrapper[4798]: I1011 04:29:50.472640 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-z8kc4" Oct 11 04:29:50 crc kubenswrapper[4798]: I1011 04:29:50.472842 4798 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-z8kc4" Oct 11 04:29:50 crc kubenswrapper[4798]: I1011 04:29:50.536035 4798 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-z8kc4" Oct 11 04:29:51 crc kubenswrapper[4798]: I1011 04:29:51.001205 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-z8kc4" Oct 11 04:29:51 crc kubenswrapper[4798]: I1011 04:29:51.962199 4798 generic.go:334] "Generic (PLEG): container finished" podID="dca2ef18-41f0-4b6d-8b39-d6e2b9988274" containerID="31770db84591579d0a434f82d0911e6916e1c2e17e51621d5f007271f105307a" exitCode=0 Oct 11 04:29:51 crc kubenswrapper[4798]: I1011 04:29:51.963693 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wctwr" event={"ID":"dca2ef18-41f0-4b6d-8b39-d6e2b9988274","Type":"ContainerDied","Data":"31770db84591579d0a434f82d0911e6916e1c2e17e51621d5f007271f105307a"} Oct 11 04:29:52 crc kubenswrapper[4798]: I1011 04:29:52.895080 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-z8kc4"] Oct 11 04:29:52 crc kubenswrapper[4798]: I1011 04:29:52.977833 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wctwr" event={"ID":"dca2ef18-41f0-4b6d-8b39-d6e2b9988274","Type":"ContainerStarted","Data":"133d92e07a5003dfd5006f967b152f9b9b3e47b808ecc386fcd1529f5fc6bfbd"} Oct 11 04:29:52 crc kubenswrapper[4798]: I1011 04:29:52.978094 4798 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-z8kc4" podUID="929d0839-6c1e-4da5-b961-660dd13670d3" containerName="registry-server" containerID="cri-o://6a0e6fdd86d1253007ad58e37035b8747d31223884ff572d5cc7a3e8c3e90743" gracePeriod=2 Oct 11 04:29:53 crc kubenswrapper[4798]: I1011 04:29:53.013100 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-wctwr" podStartSLOduration=2.387932245 podStartE2EDuration="6.013056641s" podCreationTimestamp="2025-10-11 04:29:47 +0000 UTC" firstStartedPulling="2025-10-11 04:29:48.917840704 +0000 UTC m=+2084.254130390" lastFinishedPulling="2025-10-11 04:29:52.54296509 +0000 UTC m=+2087.879254786" observedRunningTime="2025-10-11 04:29:53.000747403 +0000 UTC m=+2088.337037169" watchObservedRunningTime="2025-10-11 04:29:53.013056641 +0000 UTC m=+2088.349346367" Oct 11 04:29:53 crc kubenswrapper[4798]: I1011 04:29:53.435951 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-z8kc4" Oct 11 04:29:53 crc kubenswrapper[4798]: I1011 04:29:53.591085 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tmrv9\" (UniqueName: \"kubernetes.io/projected/929d0839-6c1e-4da5-b961-660dd13670d3-kube-api-access-tmrv9\") pod \"929d0839-6c1e-4da5-b961-660dd13670d3\" (UID: \"929d0839-6c1e-4da5-b961-660dd13670d3\") " Oct 11 04:29:53 crc kubenswrapper[4798]: I1011 04:29:53.591249 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/929d0839-6c1e-4da5-b961-660dd13670d3-catalog-content\") pod \"929d0839-6c1e-4da5-b961-660dd13670d3\" (UID: \"929d0839-6c1e-4da5-b961-660dd13670d3\") " Oct 11 04:29:53 crc kubenswrapper[4798]: I1011 04:29:53.591372 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/929d0839-6c1e-4da5-b961-660dd13670d3-utilities\") pod \"929d0839-6c1e-4da5-b961-660dd13670d3\" (UID: \"929d0839-6c1e-4da5-b961-660dd13670d3\") " Oct 11 04:29:53 crc kubenswrapper[4798]: I1011 04:29:53.592126 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/929d0839-6c1e-4da5-b961-660dd13670d3-utilities" (OuterVolumeSpecName: "utilities") pod "929d0839-6c1e-4da5-b961-660dd13670d3" (UID: "929d0839-6c1e-4da5-b961-660dd13670d3"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 04:29:53 crc kubenswrapper[4798]: I1011 04:29:53.592561 4798 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/929d0839-6c1e-4da5-b961-660dd13670d3-utilities\") on node \"crc\" DevicePath \"\"" Oct 11 04:29:53 crc kubenswrapper[4798]: I1011 04:29:53.608208 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/929d0839-6c1e-4da5-b961-660dd13670d3-kube-api-access-tmrv9" (OuterVolumeSpecName: "kube-api-access-tmrv9") pod "929d0839-6c1e-4da5-b961-660dd13670d3" (UID: "929d0839-6c1e-4da5-b961-660dd13670d3"). InnerVolumeSpecName "kube-api-access-tmrv9". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 04:29:53 crc kubenswrapper[4798]: I1011 04:29:53.632500 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/929d0839-6c1e-4da5-b961-660dd13670d3-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "929d0839-6c1e-4da5-b961-660dd13670d3" (UID: "929d0839-6c1e-4da5-b961-660dd13670d3"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 04:29:53 crc kubenswrapper[4798]: I1011 04:29:53.694692 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tmrv9\" (UniqueName: \"kubernetes.io/projected/929d0839-6c1e-4da5-b961-660dd13670d3-kube-api-access-tmrv9\") on node \"crc\" DevicePath \"\"" Oct 11 04:29:53 crc kubenswrapper[4798]: I1011 04:29:53.694735 4798 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/929d0839-6c1e-4da5-b961-660dd13670d3-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 11 04:29:53 crc kubenswrapper[4798]: I1011 04:29:53.995065 4798 generic.go:334] "Generic (PLEG): container finished" podID="929d0839-6c1e-4da5-b961-660dd13670d3" containerID="6a0e6fdd86d1253007ad58e37035b8747d31223884ff572d5cc7a3e8c3e90743" exitCode=0 Oct 11 04:29:53 crc kubenswrapper[4798]: I1011 04:29:53.995179 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-z8kc4" Oct 11 04:29:53 crc kubenswrapper[4798]: I1011 04:29:53.995193 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-z8kc4" event={"ID":"929d0839-6c1e-4da5-b961-660dd13670d3","Type":"ContainerDied","Data":"6a0e6fdd86d1253007ad58e37035b8747d31223884ff572d5cc7a3e8c3e90743"} Oct 11 04:29:53 crc kubenswrapper[4798]: I1011 04:29:53.995949 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-z8kc4" event={"ID":"929d0839-6c1e-4da5-b961-660dd13670d3","Type":"ContainerDied","Data":"c3bfc0fbaa0db734810c4ba574b3bb13e9b11525fff11cf2c68eeea02318e646"} Oct 11 04:29:53 crc kubenswrapper[4798]: I1011 04:29:53.995981 4798 scope.go:117] "RemoveContainer" containerID="6a0e6fdd86d1253007ad58e37035b8747d31223884ff572d5cc7a3e8c3e90743" Oct 11 04:29:54 crc kubenswrapper[4798]: I1011 04:29:54.045909 4798 scope.go:117] "RemoveContainer" containerID="a6882647b6d5bee28dcd91ab5151e175dfb1d2e9227755e30067fe04e015fb67" Oct 11 04:29:54 crc kubenswrapper[4798]: I1011 04:29:54.054060 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-z8kc4"] Oct 11 04:29:54 crc kubenswrapper[4798]: I1011 04:29:54.059853 4798 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-z8kc4"] Oct 11 04:29:54 crc kubenswrapper[4798]: I1011 04:29:54.080309 4798 scope.go:117] "RemoveContainer" containerID="204ce6e294792ca1de6dae205d8d507477fb0dd00ccd2675f9f447a70bf3475d" Oct 11 04:29:54 crc kubenswrapper[4798]: I1011 04:29:54.122268 4798 scope.go:117] "RemoveContainer" containerID="6a0e6fdd86d1253007ad58e37035b8747d31223884ff572d5cc7a3e8c3e90743" Oct 11 04:29:54 crc kubenswrapper[4798]: E1011 04:29:54.123547 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6a0e6fdd86d1253007ad58e37035b8747d31223884ff572d5cc7a3e8c3e90743\": container with ID starting with 6a0e6fdd86d1253007ad58e37035b8747d31223884ff572d5cc7a3e8c3e90743 not found: ID does not exist" containerID="6a0e6fdd86d1253007ad58e37035b8747d31223884ff572d5cc7a3e8c3e90743" Oct 11 04:29:54 crc kubenswrapper[4798]: I1011 04:29:54.123645 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6a0e6fdd86d1253007ad58e37035b8747d31223884ff572d5cc7a3e8c3e90743"} err="failed to get container status \"6a0e6fdd86d1253007ad58e37035b8747d31223884ff572d5cc7a3e8c3e90743\": rpc error: code = NotFound desc = could not find container \"6a0e6fdd86d1253007ad58e37035b8747d31223884ff572d5cc7a3e8c3e90743\": container with ID starting with 6a0e6fdd86d1253007ad58e37035b8747d31223884ff572d5cc7a3e8c3e90743 not found: ID does not exist" Oct 11 04:29:54 crc kubenswrapper[4798]: I1011 04:29:54.123691 4798 scope.go:117] "RemoveContainer" containerID="a6882647b6d5bee28dcd91ab5151e175dfb1d2e9227755e30067fe04e015fb67" Oct 11 04:29:54 crc kubenswrapper[4798]: E1011 04:29:54.124081 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a6882647b6d5bee28dcd91ab5151e175dfb1d2e9227755e30067fe04e015fb67\": container with ID starting with a6882647b6d5bee28dcd91ab5151e175dfb1d2e9227755e30067fe04e015fb67 not found: ID does not exist" containerID="a6882647b6d5bee28dcd91ab5151e175dfb1d2e9227755e30067fe04e015fb67" Oct 11 04:29:54 crc kubenswrapper[4798]: I1011 04:29:54.124127 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a6882647b6d5bee28dcd91ab5151e175dfb1d2e9227755e30067fe04e015fb67"} err="failed to get container status \"a6882647b6d5bee28dcd91ab5151e175dfb1d2e9227755e30067fe04e015fb67\": rpc error: code = NotFound desc = could not find container \"a6882647b6d5bee28dcd91ab5151e175dfb1d2e9227755e30067fe04e015fb67\": container with ID starting with a6882647b6d5bee28dcd91ab5151e175dfb1d2e9227755e30067fe04e015fb67 not found: ID does not exist" Oct 11 04:29:54 crc kubenswrapper[4798]: I1011 04:29:54.124154 4798 scope.go:117] "RemoveContainer" containerID="204ce6e294792ca1de6dae205d8d507477fb0dd00ccd2675f9f447a70bf3475d" Oct 11 04:29:54 crc kubenswrapper[4798]: E1011 04:29:54.124558 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"204ce6e294792ca1de6dae205d8d507477fb0dd00ccd2675f9f447a70bf3475d\": container with ID starting with 204ce6e294792ca1de6dae205d8d507477fb0dd00ccd2675f9f447a70bf3475d not found: ID does not exist" containerID="204ce6e294792ca1de6dae205d8d507477fb0dd00ccd2675f9f447a70bf3475d" Oct 11 04:29:54 crc kubenswrapper[4798]: I1011 04:29:54.124596 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"204ce6e294792ca1de6dae205d8d507477fb0dd00ccd2675f9f447a70bf3475d"} err="failed to get container status \"204ce6e294792ca1de6dae205d8d507477fb0dd00ccd2675f9f447a70bf3475d\": rpc error: code = NotFound desc = could not find container \"204ce6e294792ca1de6dae205d8d507477fb0dd00ccd2675f9f447a70bf3475d\": container with ID starting with 204ce6e294792ca1de6dae205d8d507477fb0dd00ccd2675f9f447a70bf3475d not found: ID does not exist" Oct 11 04:29:55 crc kubenswrapper[4798]: I1011 04:29:55.438358 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="929d0839-6c1e-4da5-b961-660dd13670d3" path="/var/lib/kubelet/pods/929d0839-6c1e-4da5-b961-660dd13670d3/volumes" Oct 11 04:29:57 crc kubenswrapper[4798]: I1011 04:29:57.898494 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-wctwr" Oct 11 04:29:57 crc kubenswrapper[4798]: I1011 04:29:57.898784 4798 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-wctwr" Oct 11 04:29:58 crc kubenswrapper[4798]: I1011 04:29:58.969232 4798 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-wctwr" podUID="dca2ef18-41f0-4b6d-8b39-d6e2b9988274" containerName="registry-server" probeResult="failure" output=< Oct 11 04:29:58 crc kubenswrapper[4798]: timeout: failed to connect service ":50051" within 1s Oct 11 04:29:58 crc kubenswrapper[4798]: > Oct 11 04:30:00 crc kubenswrapper[4798]: I1011 04:30:00.159975 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29335950-4lvhm"] Oct 11 04:30:00 crc kubenswrapper[4798]: E1011 04:30:00.160824 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="929d0839-6c1e-4da5-b961-660dd13670d3" containerName="registry-server" Oct 11 04:30:00 crc kubenswrapper[4798]: I1011 04:30:00.160859 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="929d0839-6c1e-4da5-b961-660dd13670d3" containerName="registry-server" Oct 11 04:30:00 crc kubenswrapper[4798]: E1011 04:30:00.160897 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="929d0839-6c1e-4da5-b961-660dd13670d3" containerName="extract-utilities" Oct 11 04:30:00 crc kubenswrapper[4798]: I1011 04:30:00.160915 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="929d0839-6c1e-4da5-b961-660dd13670d3" containerName="extract-utilities" Oct 11 04:30:00 crc kubenswrapper[4798]: E1011 04:30:00.160964 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="929d0839-6c1e-4da5-b961-660dd13670d3" containerName="extract-content" Oct 11 04:30:00 crc kubenswrapper[4798]: I1011 04:30:00.160983 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="929d0839-6c1e-4da5-b961-660dd13670d3" containerName="extract-content" Oct 11 04:30:00 crc kubenswrapper[4798]: I1011 04:30:00.161469 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="929d0839-6c1e-4da5-b961-660dd13670d3" containerName="registry-server" Oct 11 04:30:00 crc kubenswrapper[4798]: I1011 04:30:00.163035 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29335950-4lvhm" Oct 11 04:30:00 crc kubenswrapper[4798]: I1011 04:30:00.167594 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Oct 11 04:30:00 crc kubenswrapper[4798]: I1011 04:30:00.175810 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29335950-4lvhm"] Oct 11 04:30:00 crc kubenswrapper[4798]: I1011 04:30:00.180590 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Oct 11 04:30:00 crc kubenswrapper[4798]: I1011 04:30:00.244240 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8hpxs\" (UniqueName: \"kubernetes.io/projected/4a610842-834e-4254-a0f1-861cd73386b3-kube-api-access-8hpxs\") pod \"collect-profiles-29335950-4lvhm\" (UID: \"4a610842-834e-4254-a0f1-861cd73386b3\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29335950-4lvhm" Oct 11 04:30:00 crc kubenswrapper[4798]: I1011 04:30:00.244771 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/4a610842-834e-4254-a0f1-861cd73386b3-secret-volume\") pod \"collect-profiles-29335950-4lvhm\" (UID: \"4a610842-834e-4254-a0f1-861cd73386b3\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29335950-4lvhm" Oct 11 04:30:00 crc kubenswrapper[4798]: I1011 04:30:00.244852 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/4a610842-834e-4254-a0f1-861cd73386b3-config-volume\") pod \"collect-profiles-29335950-4lvhm\" (UID: \"4a610842-834e-4254-a0f1-861cd73386b3\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29335950-4lvhm" Oct 11 04:30:00 crc kubenswrapper[4798]: I1011 04:30:00.347202 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8hpxs\" (UniqueName: \"kubernetes.io/projected/4a610842-834e-4254-a0f1-861cd73386b3-kube-api-access-8hpxs\") pod \"collect-profiles-29335950-4lvhm\" (UID: \"4a610842-834e-4254-a0f1-861cd73386b3\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29335950-4lvhm" Oct 11 04:30:00 crc kubenswrapper[4798]: I1011 04:30:00.347445 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/4a610842-834e-4254-a0f1-861cd73386b3-secret-volume\") pod \"collect-profiles-29335950-4lvhm\" (UID: \"4a610842-834e-4254-a0f1-861cd73386b3\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29335950-4lvhm" Oct 11 04:30:00 crc kubenswrapper[4798]: I1011 04:30:00.347586 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/4a610842-834e-4254-a0f1-861cd73386b3-config-volume\") pod \"collect-profiles-29335950-4lvhm\" (UID: \"4a610842-834e-4254-a0f1-861cd73386b3\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29335950-4lvhm" Oct 11 04:30:00 crc kubenswrapper[4798]: I1011 04:30:00.348737 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/4a610842-834e-4254-a0f1-861cd73386b3-config-volume\") pod \"collect-profiles-29335950-4lvhm\" (UID: \"4a610842-834e-4254-a0f1-861cd73386b3\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29335950-4lvhm" Oct 11 04:30:00 crc kubenswrapper[4798]: I1011 04:30:00.355151 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/4a610842-834e-4254-a0f1-861cd73386b3-secret-volume\") pod \"collect-profiles-29335950-4lvhm\" (UID: \"4a610842-834e-4254-a0f1-861cd73386b3\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29335950-4lvhm" Oct 11 04:30:00 crc kubenswrapper[4798]: I1011 04:30:00.364880 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8hpxs\" (UniqueName: \"kubernetes.io/projected/4a610842-834e-4254-a0f1-861cd73386b3-kube-api-access-8hpxs\") pod \"collect-profiles-29335950-4lvhm\" (UID: \"4a610842-834e-4254-a0f1-861cd73386b3\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29335950-4lvhm" Oct 11 04:30:00 crc kubenswrapper[4798]: I1011 04:30:00.501162 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29335950-4lvhm" Oct 11 04:30:00 crc kubenswrapper[4798]: I1011 04:30:00.981256 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29335950-4lvhm"] Oct 11 04:30:01 crc kubenswrapper[4798]: I1011 04:30:01.057423 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29335950-4lvhm" event={"ID":"4a610842-834e-4254-a0f1-861cd73386b3","Type":"ContainerStarted","Data":"39d8087d2e9852da390d507e2001880486901a55585098ef0ed11bcdf532f55a"} Oct 11 04:30:02 crc kubenswrapper[4798]: I1011 04:30:02.066886 4798 generic.go:334] "Generic (PLEG): container finished" podID="4a610842-834e-4254-a0f1-861cd73386b3" containerID="6a2c0d931704f8a25630e68d91d358706e5b93ee2014e6b76269fc11a97ceb2b" exitCode=0 Oct 11 04:30:02 crc kubenswrapper[4798]: I1011 04:30:02.066931 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29335950-4lvhm" event={"ID":"4a610842-834e-4254-a0f1-861cd73386b3","Type":"ContainerDied","Data":"6a2c0d931704f8a25630e68d91d358706e5b93ee2014e6b76269fc11a97ceb2b"} Oct 11 04:30:03 crc kubenswrapper[4798]: I1011 04:30:03.430923 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29335950-4lvhm" Oct 11 04:30:03 crc kubenswrapper[4798]: I1011 04:30:03.445350 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/4a610842-834e-4254-a0f1-861cd73386b3-secret-volume\") pod \"4a610842-834e-4254-a0f1-861cd73386b3\" (UID: \"4a610842-834e-4254-a0f1-861cd73386b3\") " Oct 11 04:30:03 crc kubenswrapper[4798]: I1011 04:30:03.445548 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/4a610842-834e-4254-a0f1-861cd73386b3-config-volume\") pod \"4a610842-834e-4254-a0f1-861cd73386b3\" (UID: \"4a610842-834e-4254-a0f1-861cd73386b3\") " Oct 11 04:30:03 crc kubenswrapper[4798]: I1011 04:30:03.445610 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8hpxs\" (UniqueName: \"kubernetes.io/projected/4a610842-834e-4254-a0f1-861cd73386b3-kube-api-access-8hpxs\") pod \"4a610842-834e-4254-a0f1-861cd73386b3\" (UID: \"4a610842-834e-4254-a0f1-861cd73386b3\") " Oct 11 04:30:03 crc kubenswrapper[4798]: I1011 04:30:03.447228 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4a610842-834e-4254-a0f1-861cd73386b3-config-volume" (OuterVolumeSpecName: "config-volume") pod "4a610842-834e-4254-a0f1-861cd73386b3" (UID: "4a610842-834e-4254-a0f1-861cd73386b3"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 04:30:03 crc kubenswrapper[4798]: I1011 04:30:03.453491 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4a610842-834e-4254-a0f1-861cd73386b3-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "4a610842-834e-4254-a0f1-861cd73386b3" (UID: "4a610842-834e-4254-a0f1-861cd73386b3"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:30:03 crc kubenswrapper[4798]: I1011 04:30:03.453550 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4a610842-834e-4254-a0f1-861cd73386b3-kube-api-access-8hpxs" (OuterVolumeSpecName: "kube-api-access-8hpxs") pod "4a610842-834e-4254-a0f1-861cd73386b3" (UID: "4a610842-834e-4254-a0f1-861cd73386b3"). InnerVolumeSpecName "kube-api-access-8hpxs". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 04:30:03 crc kubenswrapper[4798]: I1011 04:30:03.548056 4798 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/4a610842-834e-4254-a0f1-861cd73386b3-config-volume\") on node \"crc\" DevicePath \"\"" Oct 11 04:30:03 crc kubenswrapper[4798]: I1011 04:30:03.548090 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8hpxs\" (UniqueName: \"kubernetes.io/projected/4a610842-834e-4254-a0f1-861cd73386b3-kube-api-access-8hpxs\") on node \"crc\" DevicePath \"\"" Oct 11 04:30:03 crc kubenswrapper[4798]: I1011 04:30:03.548102 4798 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/4a610842-834e-4254-a0f1-861cd73386b3-secret-volume\") on node \"crc\" DevicePath \"\"" Oct 11 04:30:04 crc kubenswrapper[4798]: I1011 04:30:04.085286 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29335950-4lvhm" event={"ID":"4a610842-834e-4254-a0f1-861cd73386b3","Type":"ContainerDied","Data":"39d8087d2e9852da390d507e2001880486901a55585098ef0ed11bcdf532f55a"} Oct 11 04:30:04 crc kubenswrapper[4798]: I1011 04:30:04.085324 4798 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="39d8087d2e9852da390d507e2001880486901a55585098ef0ed11bcdf532f55a" Oct 11 04:30:04 crc kubenswrapper[4798]: I1011 04:30:04.085370 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29335950-4lvhm" Oct 11 04:30:04 crc kubenswrapper[4798]: I1011 04:30:04.507184 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29335905-vgmmx"] Oct 11 04:30:04 crc kubenswrapper[4798]: I1011 04:30:04.512719 4798 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29335905-vgmmx"] Oct 11 04:30:05 crc kubenswrapper[4798]: I1011 04:30:05.441589 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e5439d4a-8ce5-48c3-b0db-9e4b24ebfcdd" path="/var/lib/kubelet/pods/e5439d4a-8ce5-48c3-b0db-9e4b24ebfcdd/volumes" Oct 11 04:30:07 crc kubenswrapper[4798]: I1011 04:30:07.964246 4798 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-wctwr" Oct 11 04:30:08 crc kubenswrapper[4798]: I1011 04:30:08.030510 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-wctwr" Oct 11 04:30:08 crc kubenswrapper[4798]: I1011 04:30:08.214521 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-wctwr"] Oct 11 04:30:09 crc kubenswrapper[4798]: I1011 04:30:09.131377 4798 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-wctwr" podUID="dca2ef18-41f0-4b6d-8b39-d6e2b9988274" containerName="registry-server" containerID="cri-o://133d92e07a5003dfd5006f967b152f9b9b3e47b808ecc386fcd1529f5fc6bfbd" gracePeriod=2 Oct 11 04:30:09 crc kubenswrapper[4798]: I1011 04:30:09.594577 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wctwr" Oct 11 04:30:09 crc kubenswrapper[4798]: I1011 04:30:09.663459 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dca2ef18-41f0-4b6d-8b39-d6e2b9988274-utilities\") pod \"dca2ef18-41f0-4b6d-8b39-d6e2b9988274\" (UID: \"dca2ef18-41f0-4b6d-8b39-d6e2b9988274\") " Oct 11 04:30:09 crc kubenswrapper[4798]: I1011 04:30:09.663579 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-b22w7\" (UniqueName: \"kubernetes.io/projected/dca2ef18-41f0-4b6d-8b39-d6e2b9988274-kube-api-access-b22w7\") pod \"dca2ef18-41f0-4b6d-8b39-d6e2b9988274\" (UID: \"dca2ef18-41f0-4b6d-8b39-d6e2b9988274\") " Oct 11 04:30:09 crc kubenswrapper[4798]: I1011 04:30:09.663939 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dca2ef18-41f0-4b6d-8b39-d6e2b9988274-catalog-content\") pod \"dca2ef18-41f0-4b6d-8b39-d6e2b9988274\" (UID: \"dca2ef18-41f0-4b6d-8b39-d6e2b9988274\") " Oct 11 04:30:09 crc kubenswrapper[4798]: I1011 04:30:09.665304 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dca2ef18-41f0-4b6d-8b39-d6e2b9988274-utilities" (OuterVolumeSpecName: "utilities") pod "dca2ef18-41f0-4b6d-8b39-d6e2b9988274" (UID: "dca2ef18-41f0-4b6d-8b39-d6e2b9988274"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 04:30:09 crc kubenswrapper[4798]: I1011 04:30:09.674054 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dca2ef18-41f0-4b6d-8b39-d6e2b9988274-kube-api-access-b22w7" (OuterVolumeSpecName: "kube-api-access-b22w7") pod "dca2ef18-41f0-4b6d-8b39-d6e2b9988274" (UID: "dca2ef18-41f0-4b6d-8b39-d6e2b9988274"). InnerVolumeSpecName "kube-api-access-b22w7". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 04:30:09 crc kubenswrapper[4798]: I1011 04:30:09.756774 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dca2ef18-41f0-4b6d-8b39-d6e2b9988274-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "dca2ef18-41f0-4b6d-8b39-d6e2b9988274" (UID: "dca2ef18-41f0-4b6d-8b39-d6e2b9988274"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 04:30:09 crc kubenswrapper[4798]: I1011 04:30:09.770265 4798 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dca2ef18-41f0-4b6d-8b39-d6e2b9988274-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 11 04:30:09 crc kubenswrapper[4798]: I1011 04:30:09.770298 4798 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dca2ef18-41f0-4b6d-8b39-d6e2b9988274-utilities\") on node \"crc\" DevicePath \"\"" Oct 11 04:30:09 crc kubenswrapper[4798]: I1011 04:30:09.770310 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-b22w7\" (UniqueName: \"kubernetes.io/projected/dca2ef18-41f0-4b6d-8b39-d6e2b9988274-kube-api-access-b22w7\") on node \"crc\" DevicePath \"\"" Oct 11 04:30:10 crc kubenswrapper[4798]: I1011 04:30:10.143576 4798 generic.go:334] "Generic (PLEG): container finished" podID="dca2ef18-41f0-4b6d-8b39-d6e2b9988274" containerID="133d92e07a5003dfd5006f967b152f9b9b3e47b808ecc386fcd1529f5fc6bfbd" exitCode=0 Oct 11 04:30:10 crc kubenswrapper[4798]: I1011 04:30:10.143628 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wctwr" event={"ID":"dca2ef18-41f0-4b6d-8b39-d6e2b9988274","Type":"ContainerDied","Data":"133d92e07a5003dfd5006f967b152f9b9b3e47b808ecc386fcd1529f5fc6bfbd"} Oct 11 04:30:10 crc kubenswrapper[4798]: I1011 04:30:10.143671 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wctwr" event={"ID":"dca2ef18-41f0-4b6d-8b39-d6e2b9988274","Type":"ContainerDied","Data":"16983e5d4c40c04b5c1c4c2142b8c89c6eef7446d60c81d0cacad102eade6674"} Oct 11 04:30:10 crc kubenswrapper[4798]: I1011 04:30:10.143693 4798 scope.go:117] "RemoveContainer" containerID="133d92e07a5003dfd5006f967b152f9b9b3e47b808ecc386fcd1529f5fc6bfbd" Oct 11 04:30:10 crc kubenswrapper[4798]: I1011 04:30:10.143693 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wctwr" Oct 11 04:30:10 crc kubenswrapper[4798]: I1011 04:30:10.166973 4798 scope.go:117] "RemoveContainer" containerID="31770db84591579d0a434f82d0911e6916e1c2e17e51621d5f007271f105307a" Oct 11 04:30:10 crc kubenswrapper[4798]: I1011 04:30:10.187700 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-wctwr"] Oct 11 04:30:10 crc kubenswrapper[4798]: I1011 04:30:10.196866 4798 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-wctwr"] Oct 11 04:30:10 crc kubenswrapper[4798]: I1011 04:30:10.202766 4798 scope.go:117] "RemoveContainer" containerID="6deb78e43fead796034510eca5cc0d12fe57cd8a9ae1e180ea5637f5917579e7" Oct 11 04:30:10 crc kubenswrapper[4798]: I1011 04:30:10.238724 4798 scope.go:117] "RemoveContainer" containerID="133d92e07a5003dfd5006f967b152f9b9b3e47b808ecc386fcd1529f5fc6bfbd" Oct 11 04:30:10 crc kubenswrapper[4798]: E1011 04:30:10.239428 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"133d92e07a5003dfd5006f967b152f9b9b3e47b808ecc386fcd1529f5fc6bfbd\": container with ID starting with 133d92e07a5003dfd5006f967b152f9b9b3e47b808ecc386fcd1529f5fc6bfbd not found: ID does not exist" containerID="133d92e07a5003dfd5006f967b152f9b9b3e47b808ecc386fcd1529f5fc6bfbd" Oct 11 04:30:10 crc kubenswrapper[4798]: I1011 04:30:10.239484 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"133d92e07a5003dfd5006f967b152f9b9b3e47b808ecc386fcd1529f5fc6bfbd"} err="failed to get container status \"133d92e07a5003dfd5006f967b152f9b9b3e47b808ecc386fcd1529f5fc6bfbd\": rpc error: code = NotFound desc = could not find container \"133d92e07a5003dfd5006f967b152f9b9b3e47b808ecc386fcd1529f5fc6bfbd\": container with ID starting with 133d92e07a5003dfd5006f967b152f9b9b3e47b808ecc386fcd1529f5fc6bfbd not found: ID does not exist" Oct 11 04:30:10 crc kubenswrapper[4798]: I1011 04:30:10.239513 4798 scope.go:117] "RemoveContainer" containerID="31770db84591579d0a434f82d0911e6916e1c2e17e51621d5f007271f105307a" Oct 11 04:30:10 crc kubenswrapper[4798]: E1011 04:30:10.240221 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"31770db84591579d0a434f82d0911e6916e1c2e17e51621d5f007271f105307a\": container with ID starting with 31770db84591579d0a434f82d0911e6916e1c2e17e51621d5f007271f105307a not found: ID does not exist" containerID="31770db84591579d0a434f82d0911e6916e1c2e17e51621d5f007271f105307a" Oct 11 04:30:10 crc kubenswrapper[4798]: I1011 04:30:10.240273 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"31770db84591579d0a434f82d0911e6916e1c2e17e51621d5f007271f105307a"} err="failed to get container status \"31770db84591579d0a434f82d0911e6916e1c2e17e51621d5f007271f105307a\": rpc error: code = NotFound desc = could not find container \"31770db84591579d0a434f82d0911e6916e1c2e17e51621d5f007271f105307a\": container with ID starting with 31770db84591579d0a434f82d0911e6916e1c2e17e51621d5f007271f105307a not found: ID does not exist" Oct 11 04:30:10 crc kubenswrapper[4798]: I1011 04:30:10.240296 4798 scope.go:117] "RemoveContainer" containerID="6deb78e43fead796034510eca5cc0d12fe57cd8a9ae1e180ea5637f5917579e7" Oct 11 04:30:10 crc kubenswrapper[4798]: E1011 04:30:10.240698 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6deb78e43fead796034510eca5cc0d12fe57cd8a9ae1e180ea5637f5917579e7\": container with ID starting with 6deb78e43fead796034510eca5cc0d12fe57cd8a9ae1e180ea5637f5917579e7 not found: ID does not exist" containerID="6deb78e43fead796034510eca5cc0d12fe57cd8a9ae1e180ea5637f5917579e7" Oct 11 04:30:10 crc kubenswrapper[4798]: I1011 04:30:10.240722 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6deb78e43fead796034510eca5cc0d12fe57cd8a9ae1e180ea5637f5917579e7"} err="failed to get container status \"6deb78e43fead796034510eca5cc0d12fe57cd8a9ae1e180ea5637f5917579e7\": rpc error: code = NotFound desc = could not find container \"6deb78e43fead796034510eca5cc0d12fe57cd8a9ae1e180ea5637f5917579e7\": container with ID starting with 6deb78e43fead796034510eca5cc0d12fe57cd8a9ae1e180ea5637f5917579e7 not found: ID does not exist" Oct 11 04:30:11 crc kubenswrapper[4798]: I1011 04:30:11.447941 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dca2ef18-41f0-4b6d-8b39-d6e2b9988274" path="/var/lib/kubelet/pods/dca2ef18-41f0-4b6d-8b39-d6e2b9988274/volumes" Oct 11 04:30:21 crc kubenswrapper[4798]: I1011 04:30:21.443986 4798 scope.go:117] "RemoveContainer" containerID="17bf07656456c1c694e9e48b11a6ddcdbe0f2b027a8abf029615aff20323b4bd" Oct 11 04:30:21 crc kubenswrapper[4798]: I1011 04:30:21.464952 4798 scope.go:117] "RemoveContainer" containerID="3d1456356390a4fd7657806f5dcb13eabe8d0cbd1a59d84d676145cec492ddf4" Oct 11 04:30:21 crc kubenswrapper[4798]: I1011 04:30:21.513910 4798 scope.go:117] "RemoveContainer" containerID="d5e8117ebe8ddd8470bde9b0f4953bc83c06f88fe182f5343c6a7f41fe98133d" Oct 11 04:30:21 crc kubenswrapper[4798]: I1011 04:30:21.555296 4798 scope.go:117] "RemoveContainer" containerID="e8ca87499a6303c18f9a4940a14639daabe9581c876c120adb9853a3ddfcec42" Oct 11 04:30:27 crc kubenswrapper[4798]: I1011 04:30:27.139021 4798 patch_prober.go:28] interesting pod/machine-config-daemon-h28s2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 11 04:30:27 crc kubenswrapper[4798]: I1011 04:30:27.139831 4798 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 11 04:30:44 crc kubenswrapper[4798]: I1011 04:30:44.464218 4798 generic.go:334] "Generic (PLEG): container finished" podID="6c616614-d70b-48a1-864f-df99b3bb33f3" containerID="2a50336ab7851f3b50d10736db8d64a8a4130f1ad25db424e087799826aa8fca" exitCode=0 Oct 11 04:30:44 crc kubenswrapper[4798]: I1011 04:30:44.464271 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-msblf" event={"ID":"6c616614-d70b-48a1-864f-df99b3bb33f3","Type":"ContainerDied","Data":"2a50336ab7851f3b50d10736db8d64a8a4130f1ad25db424e087799826aa8fca"} Oct 11 04:30:45 crc kubenswrapper[4798]: I1011 04:30:45.918283 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-msblf" Oct 11 04:30:45 crc kubenswrapper[4798]: I1011 04:30:45.993988 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/6c616614-d70b-48a1-864f-df99b3bb33f3-ssh-key\") pod \"6c616614-d70b-48a1-864f-df99b3bb33f3\" (UID: \"6c616614-d70b-48a1-864f-df99b3bb33f3\") " Oct 11 04:30:45 crc kubenswrapper[4798]: I1011 04:30:45.994165 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/6c616614-d70b-48a1-864f-df99b3bb33f3-ceph\") pod \"6c616614-d70b-48a1-864f-df99b3bb33f3\" (UID: \"6c616614-d70b-48a1-864f-df99b3bb33f3\") " Oct 11 04:30:45 crc kubenswrapper[4798]: I1011 04:30:45.994225 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fjgz4\" (UniqueName: \"kubernetes.io/projected/6c616614-d70b-48a1-864f-df99b3bb33f3-kube-api-access-fjgz4\") pod \"6c616614-d70b-48a1-864f-df99b3bb33f3\" (UID: \"6c616614-d70b-48a1-864f-df99b3bb33f3\") " Oct 11 04:30:45 crc kubenswrapper[4798]: I1011 04:30:45.994253 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6c616614-d70b-48a1-864f-df99b3bb33f3-bootstrap-combined-ca-bundle\") pod \"6c616614-d70b-48a1-864f-df99b3bb33f3\" (UID: \"6c616614-d70b-48a1-864f-df99b3bb33f3\") " Oct 11 04:30:45 crc kubenswrapper[4798]: I1011 04:30:45.994345 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6c616614-d70b-48a1-864f-df99b3bb33f3-inventory\") pod \"6c616614-d70b-48a1-864f-df99b3bb33f3\" (UID: \"6c616614-d70b-48a1-864f-df99b3bb33f3\") " Oct 11 04:30:46 crc kubenswrapper[4798]: I1011 04:30:46.000813 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6c616614-d70b-48a1-864f-df99b3bb33f3-kube-api-access-fjgz4" (OuterVolumeSpecName: "kube-api-access-fjgz4") pod "6c616614-d70b-48a1-864f-df99b3bb33f3" (UID: "6c616614-d70b-48a1-864f-df99b3bb33f3"). InnerVolumeSpecName "kube-api-access-fjgz4". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 04:30:46 crc kubenswrapper[4798]: I1011 04:30:46.000842 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6c616614-d70b-48a1-864f-df99b3bb33f3-bootstrap-combined-ca-bundle" (OuterVolumeSpecName: "bootstrap-combined-ca-bundle") pod "6c616614-d70b-48a1-864f-df99b3bb33f3" (UID: "6c616614-d70b-48a1-864f-df99b3bb33f3"). InnerVolumeSpecName "bootstrap-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:30:46 crc kubenswrapper[4798]: I1011 04:30:46.007725 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6c616614-d70b-48a1-864f-df99b3bb33f3-ceph" (OuterVolumeSpecName: "ceph") pod "6c616614-d70b-48a1-864f-df99b3bb33f3" (UID: "6c616614-d70b-48a1-864f-df99b3bb33f3"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:30:46 crc kubenswrapper[4798]: I1011 04:30:46.022883 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6c616614-d70b-48a1-864f-df99b3bb33f3-inventory" (OuterVolumeSpecName: "inventory") pod "6c616614-d70b-48a1-864f-df99b3bb33f3" (UID: "6c616614-d70b-48a1-864f-df99b3bb33f3"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:30:46 crc kubenswrapper[4798]: I1011 04:30:46.024155 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6c616614-d70b-48a1-864f-df99b3bb33f3-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "6c616614-d70b-48a1-864f-df99b3bb33f3" (UID: "6c616614-d70b-48a1-864f-df99b3bb33f3"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:30:46 crc kubenswrapper[4798]: I1011 04:30:46.097746 4798 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/6c616614-d70b-48a1-864f-df99b3bb33f3-ssh-key\") on node \"crc\" DevicePath \"\"" Oct 11 04:30:46 crc kubenswrapper[4798]: I1011 04:30:46.097786 4798 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/6c616614-d70b-48a1-864f-df99b3bb33f3-ceph\") on node \"crc\" DevicePath \"\"" Oct 11 04:30:46 crc kubenswrapper[4798]: I1011 04:30:46.097796 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fjgz4\" (UniqueName: \"kubernetes.io/projected/6c616614-d70b-48a1-864f-df99b3bb33f3-kube-api-access-fjgz4\") on node \"crc\" DevicePath \"\"" Oct 11 04:30:46 crc kubenswrapper[4798]: I1011 04:30:46.097808 4798 reconciler_common.go:293] "Volume detached for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6c616614-d70b-48a1-864f-df99b3bb33f3-bootstrap-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 11 04:30:46 crc kubenswrapper[4798]: I1011 04:30:46.097818 4798 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6c616614-d70b-48a1-864f-df99b3bb33f3-inventory\") on node \"crc\" DevicePath \"\"" Oct 11 04:30:46 crc kubenswrapper[4798]: I1011 04:30:46.482034 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-msblf" event={"ID":"6c616614-d70b-48a1-864f-df99b3bb33f3","Type":"ContainerDied","Data":"646a94d322ae4eb7d4ab3b63bc5fed88dd6e82b4d726d619b1ba2ab6cd8ac602"} Oct 11 04:30:46 crc kubenswrapper[4798]: I1011 04:30:46.482074 4798 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="646a94d322ae4eb7d4ab3b63bc5fed88dd6e82b4d726d619b1ba2ab6cd8ac602" Oct 11 04:30:46 crc kubenswrapper[4798]: I1011 04:30:46.482157 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-msblf" Oct 11 04:30:46 crc kubenswrapper[4798]: I1011 04:30:46.586680 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-dqnkj"] Oct 11 04:30:46 crc kubenswrapper[4798]: E1011 04:30:46.587122 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dca2ef18-41f0-4b6d-8b39-d6e2b9988274" containerName="extract-content" Oct 11 04:30:46 crc kubenswrapper[4798]: I1011 04:30:46.587151 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="dca2ef18-41f0-4b6d-8b39-d6e2b9988274" containerName="extract-content" Oct 11 04:30:46 crc kubenswrapper[4798]: E1011 04:30:46.587170 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dca2ef18-41f0-4b6d-8b39-d6e2b9988274" containerName="registry-server" Oct 11 04:30:46 crc kubenswrapper[4798]: I1011 04:30:46.587179 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="dca2ef18-41f0-4b6d-8b39-d6e2b9988274" containerName="registry-server" Oct 11 04:30:46 crc kubenswrapper[4798]: E1011 04:30:46.587196 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dca2ef18-41f0-4b6d-8b39-d6e2b9988274" containerName="extract-utilities" Oct 11 04:30:46 crc kubenswrapper[4798]: I1011 04:30:46.587204 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="dca2ef18-41f0-4b6d-8b39-d6e2b9988274" containerName="extract-utilities" Oct 11 04:30:46 crc kubenswrapper[4798]: E1011 04:30:46.587227 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6c616614-d70b-48a1-864f-df99b3bb33f3" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Oct 11 04:30:46 crc kubenswrapper[4798]: I1011 04:30:46.587237 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="6c616614-d70b-48a1-864f-df99b3bb33f3" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Oct 11 04:30:46 crc kubenswrapper[4798]: E1011 04:30:46.587254 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4a610842-834e-4254-a0f1-861cd73386b3" containerName="collect-profiles" Oct 11 04:30:46 crc kubenswrapper[4798]: I1011 04:30:46.587262 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="4a610842-834e-4254-a0f1-861cd73386b3" containerName="collect-profiles" Oct 11 04:30:46 crc kubenswrapper[4798]: I1011 04:30:46.587494 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="4a610842-834e-4254-a0f1-861cd73386b3" containerName="collect-profiles" Oct 11 04:30:46 crc kubenswrapper[4798]: I1011 04:30:46.587514 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="6c616614-d70b-48a1-864f-df99b3bb33f3" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Oct 11 04:30:46 crc kubenswrapper[4798]: I1011 04:30:46.587529 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="dca2ef18-41f0-4b6d-8b39-d6e2b9988274" containerName="registry-server" Oct 11 04:30:46 crc kubenswrapper[4798]: I1011 04:30:46.588276 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-dqnkj" Oct 11 04:30:46 crc kubenswrapper[4798]: I1011 04:30:46.592939 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-mgsh6" Oct 11 04:30:46 crc kubenswrapper[4798]: I1011 04:30:46.593063 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Oct 11 04:30:46 crc kubenswrapper[4798]: I1011 04:30:46.593294 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Oct 11 04:30:46 crc kubenswrapper[4798]: I1011 04:30:46.593600 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Oct 11 04:30:46 crc kubenswrapper[4798]: I1011 04:30:46.594039 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceph-conf-files" Oct 11 04:30:46 crc kubenswrapper[4798]: I1011 04:30:46.615193 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-dqnkj"] Oct 11 04:30:46 crc kubenswrapper[4798]: I1011 04:30:46.707174 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w7xrc\" (UniqueName: \"kubernetes.io/projected/c1de1dc5-3b9c-43b2-bb27-3f8e20b9f8c6-kube-api-access-w7xrc\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-dqnkj\" (UID: \"c1de1dc5-3b9c-43b2-bb27-3f8e20b9f8c6\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-dqnkj" Oct 11 04:30:46 crc kubenswrapper[4798]: I1011 04:30:46.707227 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c1de1dc5-3b9c-43b2-bb27-3f8e20b9f8c6-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-dqnkj\" (UID: \"c1de1dc5-3b9c-43b2-bb27-3f8e20b9f8c6\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-dqnkj" Oct 11 04:30:46 crc kubenswrapper[4798]: I1011 04:30:46.707300 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/c1de1dc5-3b9c-43b2-bb27-3f8e20b9f8c6-ceph\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-dqnkj\" (UID: \"c1de1dc5-3b9c-43b2-bb27-3f8e20b9f8c6\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-dqnkj" Oct 11 04:30:46 crc kubenswrapper[4798]: I1011 04:30:46.707358 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c1de1dc5-3b9c-43b2-bb27-3f8e20b9f8c6-ssh-key\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-dqnkj\" (UID: \"c1de1dc5-3b9c-43b2-bb27-3f8e20b9f8c6\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-dqnkj" Oct 11 04:30:46 crc kubenswrapper[4798]: I1011 04:30:46.809089 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w7xrc\" (UniqueName: \"kubernetes.io/projected/c1de1dc5-3b9c-43b2-bb27-3f8e20b9f8c6-kube-api-access-w7xrc\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-dqnkj\" (UID: \"c1de1dc5-3b9c-43b2-bb27-3f8e20b9f8c6\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-dqnkj" Oct 11 04:30:46 crc kubenswrapper[4798]: I1011 04:30:46.809477 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c1de1dc5-3b9c-43b2-bb27-3f8e20b9f8c6-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-dqnkj\" (UID: \"c1de1dc5-3b9c-43b2-bb27-3f8e20b9f8c6\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-dqnkj" Oct 11 04:30:46 crc kubenswrapper[4798]: I1011 04:30:46.809507 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/c1de1dc5-3b9c-43b2-bb27-3f8e20b9f8c6-ceph\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-dqnkj\" (UID: \"c1de1dc5-3b9c-43b2-bb27-3f8e20b9f8c6\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-dqnkj" Oct 11 04:30:46 crc kubenswrapper[4798]: I1011 04:30:46.809564 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c1de1dc5-3b9c-43b2-bb27-3f8e20b9f8c6-ssh-key\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-dqnkj\" (UID: \"c1de1dc5-3b9c-43b2-bb27-3f8e20b9f8c6\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-dqnkj" Oct 11 04:30:46 crc kubenswrapper[4798]: I1011 04:30:46.813015 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c1de1dc5-3b9c-43b2-bb27-3f8e20b9f8c6-ssh-key\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-dqnkj\" (UID: \"c1de1dc5-3b9c-43b2-bb27-3f8e20b9f8c6\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-dqnkj" Oct 11 04:30:46 crc kubenswrapper[4798]: I1011 04:30:46.813188 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c1de1dc5-3b9c-43b2-bb27-3f8e20b9f8c6-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-dqnkj\" (UID: \"c1de1dc5-3b9c-43b2-bb27-3f8e20b9f8c6\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-dqnkj" Oct 11 04:30:46 crc kubenswrapper[4798]: I1011 04:30:46.817939 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/c1de1dc5-3b9c-43b2-bb27-3f8e20b9f8c6-ceph\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-dqnkj\" (UID: \"c1de1dc5-3b9c-43b2-bb27-3f8e20b9f8c6\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-dqnkj" Oct 11 04:30:46 crc kubenswrapper[4798]: I1011 04:30:46.825986 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w7xrc\" (UniqueName: \"kubernetes.io/projected/c1de1dc5-3b9c-43b2-bb27-3f8e20b9f8c6-kube-api-access-w7xrc\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-dqnkj\" (UID: \"c1de1dc5-3b9c-43b2-bb27-3f8e20b9f8c6\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-dqnkj" Oct 11 04:30:46 crc kubenswrapper[4798]: I1011 04:30:46.903957 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-dqnkj" Oct 11 04:30:47 crc kubenswrapper[4798]: I1011 04:30:47.439006 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-dqnkj"] Oct 11 04:30:47 crc kubenswrapper[4798]: I1011 04:30:47.492134 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-dqnkj" event={"ID":"c1de1dc5-3b9c-43b2-bb27-3f8e20b9f8c6","Type":"ContainerStarted","Data":"7f842f252bc9e74841191ac637d2e6452b50db57666c0f57daaf737c30dbd060"} Oct 11 04:30:48 crc kubenswrapper[4798]: I1011 04:30:48.510296 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-dqnkj" event={"ID":"c1de1dc5-3b9c-43b2-bb27-3f8e20b9f8c6","Type":"ContainerStarted","Data":"e8b5d06dba47c7757f8c9d9b50ae970c318625e94e418072092561822518ab21"} Oct 11 04:30:48 crc kubenswrapper[4798]: I1011 04:30:48.540833 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-dqnkj" podStartSLOduration=2.09572392 podStartE2EDuration="2.540811831s" podCreationTimestamp="2025-10-11 04:30:46 +0000 UTC" firstStartedPulling="2025-10-11 04:30:47.430336951 +0000 UTC m=+2142.766626637" lastFinishedPulling="2025-10-11 04:30:47.875424862 +0000 UTC m=+2143.211714548" observedRunningTime="2025-10-11 04:30:48.536323103 +0000 UTC m=+2143.872612829" watchObservedRunningTime="2025-10-11 04:30:48.540811831 +0000 UTC m=+2143.877101527" Oct 11 04:30:57 crc kubenswrapper[4798]: I1011 04:30:57.138442 4798 patch_prober.go:28] interesting pod/machine-config-daemon-h28s2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 11 04:30:57 crc kubenswrapper[4798]: I1011 04:30:57.139122 4798 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 11 04:31:14 crc kubenswrapper[4798]: I1011 04:31:14.753876 4798 generic.go:334] "Generic (PLEG): container finished" podID="c1de1dc5-3b9c-43b2-bb27-3f8e20b9f8c6" containerID="e8b5d06dba47c7757f8c9d9b50ae970c318625e94e418072092561822518ab21" exitCode=0 Oct 11 04:31:14 crc kubenswrapper[4798]: I1011 04:31:14.754011 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-dqnkj" event={"ID":"c1de1dc5-3b9c-43b2-bb27-3f8e20b9f8c6","Type":"ContainerDied","Data":"e8b5d06dba47c7757f8c9d9b50ae970c318625e94e418072092561822518ab21"} Oct 11 04:31:16 crc kubenswrapper[4798]: I1011 04:31:16.174472 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-dqnkj" Oct 11 04:31:16 crc kubenswrapper[4798]: I1011 04:31:16.212908 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/c1de1dc5-3b9c-43b2-bb27-3f8e20b9f8c6-ceph\") pod \"c1de1dc5-3b9c-43b2-bb27-3f8e20b9f8c6\" (UID: \"c1de1dc5-3b9c-43b2-bb27-3f8e20b9f8c6\") " Oct 11 04:31:16 crc kubenswrapper[4798]: I1011 04:31:16.212973 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c1de1dc5-3b9c-43b2-bb27-3f8e20b9f8c6-inventory\") pod \"c1de1dc5-3b9c-43b2-bb27-3f8e20b9f8c6\" (UID: \"c1de1dc5-3b9c-43b2-bb27-3f8e20b9f8c6\") " Oct 11 04:31:16 crc kubenswrapper[4798]: I1011 04:31:16.213053 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w7xrc\" (UniqueName: \"kubernetes.io/projected/c1de1dc5-3b9c-43b2-bb27-3f8e20b9f8c6-kube-api-access-w7xrc\") pod \"c1de1dc5-3b9c-43b2-bb27-3f8e20b9f8c6\" (UID: \"c1de1dc5-3b9c-43b2-bb27-3f8e20b9f8c6\") " Oct 11 04:31:16 crc kubenswrapper[4798]: I1011 04:31:16.213078 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c1de1dc5-3b9c-43b2-bb27-3f8e20b9f8c6-ssh-key\") pod \"c1de1dc5-3b9c-43b2-bb27-3f8e20b9f8c6\" (UID: \"c1de1dc5-3b9c-43b2-bb27-3f8e20b9f8c6\") " Oct 11 04:31:16 crc kubenswrapper[4798]: I1011 04:31:16.225636 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c1de1dc5-3b9c-43b2-bb27-3f8e20b9f8c6-ceph" (OuterVolumeSpecName: "ceph") pod "c1de1dc5-3b9c-43b2-bb27-3f8e20b9f8c6" (UID: "c1de1dc5-3b9c-43b2-bb27-3f8e20b9f8c6"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:31:16 crc kubenswrapper[4798]: I1011 04:31:16.225751 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c1de1dc5-3b9c-43b2-bb27-3f8e20b9f8c6-kube-api-access-w7xrc" (OuterVolumeSpecName: "kube-api-access-w7xrc") pod "c1de1dc5-3b9c-43b2-bb27-3f8e20b9f8c6" (UID: "c1de1dc5-3b9c-43b2-bb27-3f8e20b9f8c6"). InnerVolumeSpecName "kube-api-access-w7xrc". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 04:31:16 crc kubenswrapper[4798]: I1011 04:31:16.245113 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c1de1dc5-3b9c-43b2-bb27-3f8e20b9f8c6-inventory" (OuterVolumeSpecName: "inventory") pod "c1de1dc5-3b9c-43b2-bb27-3f8e20b9f8c6" (UID: "c1de1dc5-3b9c-43b2-bb27-3f8e20b9f8c6"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:31:16 crc kubenswrapper[4798]: I1011 04:31:16.245829 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c1de1dc5-3b9c-43b2-bb27-3f8e20b9f8c6-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "c1de1dc5-3b9c-43b2-bb27-3f8e20b9f8c6" (UID: "c1de1dc5-3b9c-43b2-bb27-3f8e20b9f8c6"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:31:16 crc kubenswrapper[4798]: I1011 04:31:16.315119 4798 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/c1de1dc5-3b9c-43b2-bb27-3f8e20b9f8c6-ceph\") on node \"crc\" DevicePath \"\"" Oct 11 04:31:16 crc kubenswrapper[4798]: I1011 04:31:16.315150 4798 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c1de1dc5-3b9c-43b2-bb27-3f8e20b9f8c6-inventory\") on node \"crc\" DevicePath \"\"" Oct 11 04:31:16 crc kubenswrapper[4798]: I1011 04:31:16.315163 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w7xrc\" (UniqueName: \"kubernetes.io/projected/c1de1dc5-3b9c-43b2-bb27-3f8e20b9f8c6-kube-api-access-w7xrc\") on node \"crc\" DevicePath \"\"" Oct 11 04:31:16 crc kubenswrapper[4798]: I1011 04:31:16.315174 4798 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c1de1dc5-3b9c-43b2-bb27-3f8e20b9f8c6-ssh-key\") on node \"crc\" DevicePath \"\"" Oct 11 04:31:16 crc kubenswrapper[4798]: I1011 04:31:16.772119 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-dqnkj" event={"ID":"c1de1dc5-3b9c-43b2-bb27-3f8e20b9f8c6","Type":"ContainerDied","Data":"7f842f252bc9e74841191ac637d2e6452b50db57666c0f57daaf737c30dbd060"} Oct 11 04:31:16 crc kubenswrapper[4798]: I1011 04:31:16.772511 4798 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7f842f252bc9e74841191ac637d2e6452b50db57666c0f57daaf737c30dbd060" Oct 11 04:31:16 crc kubenswrapper[4798]: I1011 04:31:16.772154 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-dqnkj" Oct 11 04:31:16 crc kubenswrapper[4798]: I1011 04:31:16.869535 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-bb949"] Oct 11 04:31:16 crc kubenswrapper[4798]: E1011 04:31:16.870107 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c1de1dc5-3b9c-43b2-bb27-3f8e20b9f8c6" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Oct 11 04:31:16 crc kubenswrapper[4798]: I1011 04:31:16.870123 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="c1de1dc5-3b9c-43b2-bb27-3f8e20b9f8c6" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Oct 11 04:31:16 crc kubenswrapper[4798]: I1011 04:31:16.870334 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="c1de1dc5-3b9c-43b2-bb27-3f8e20b9f8c6" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Oct 11 04:31:16 crc kubenswrapper[4798]: I1011 04:31:16.870947 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-bb949" Oct 11 04:31:16 crc kubenswrapper[4798]: I1011 04:31:16.879040 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceph-conf-files" Oct 11 04:31:16 crc kubenswrapper[4798]: I1011 04:31:16.879057 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Oct 11 04:31:16 crc kubenswrapper[4798]: I1011 04:31:16.879169 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Oct 11 04:31:16 crc kubenswrapper[4798]: I1011 04:31:16.879567 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-mgsh6" Oct 11 04:31:16 crc kubenswrapper[4798]: I1011 04:31:16.879816 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Oct 11 04:31:16 crc kubenswrapper[4798]: I1011 04:31:16.887459 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-bb949"] Oct 11 04:31:16 crc kubenswrapper[4798]: I1011 04:31:16.924527 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/57b977e1-ad34-4a35-a31e-fe6e1a0b32ee-ssh-key\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-bb949\" (UID: \"57b977e1-ad34-4a35-a31e-fe6e1a0b32ee\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-bb949" Oct 11 04:31:16 crc kubenswrapper[4798]: I1011 04:31:16.924977 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/57b977e1-ad34-4a35-a31e-fe6e1a0b32ee-ceph\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-bb949\" (UID: \"57b977e1-ad34-4a35-a31e-fe6e1a0b32ee\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-bb949" Oct 11 04:31:16 crc kubenswrapper[4798]: I1011 04:31:16.925153 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/57b977e1-ad34-4a35-a31e-fe6e1a0b32ee-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-bb949\" (UID: \"57b977e1-ad34-4a35-a31e-fe6e1a0b32ee\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-bb949" Oct 11 04:31:16 crc kubenswrapper[4798]: I1011 04:31:16.925415 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wcxvn\" (UniqueName: \"kubernetes.io/projected/57b977e1-ad34-4a35-a31e-fe6e1a0b32ee-kube-api-access-wcxvn\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-bb949\" (UID: \"57b977e1-ad34-4a35-a31e-fe6e1a0b32ee\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-bb949" Oct 11 04:31:17 crc kubenswrapper[4798]: I1011 04:31:17.027866 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wcxvn\" (UniqueName: \"kubernetes.io/projected/57b977e1-ad34-4a35-a31e-fe6e1a0b32ee-kube-api-access-wcxvn\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-bb949\" (UID: \"57b977e1-ad34-4a35-a31e-fe6e1a0b32ee\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-bb949" Oct 11 04:31:17 crc kubenswrapper[4798]: I1011 04:31:17.027982 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/57b977e1-ad34-4a35-a31e-fe6e1a0b32ee-ssh-key\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-bb949\" (UID: \"57b977e1-ad34-4a35-a31e-fe6e1a0b32ee\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-bb949" Oct 11 04:31:17 crc kubenswrapper[4798]: I1011 04:31:17.028172 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/57b977e1-ad34-4a35-a31e-fe6e1a0b32ee-ceph\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-bb949\" (UID: \"57b977e1-ad34-4a35-a31e-fe6e1a0b32ee\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-bb949" Oct 11 04:31:17 crc kubenswrapper[4798]: I1011 04:31:17.028231 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/57b977e1-ad34-4a35-a31e-fe6e1a0b32ee-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-bb949\" (UID: \"57b977e1-ad34-4a35-a31e-fe6e1a0b32ee\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-bb949" Oct 11 04:31:17 crc kubenswrapper[4798]: I1011 04:31:17.033916 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/57b977e1-ad34-4a35-a31e-fe6e1a0b32ee-ssh-key\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-bb949\" (UID: \"57b977e1-ad34-4a35-a31e-fe6e1a0b32ee\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-bb949" Oct 11 04:31:17 crc kubenswrapper[4798]: I1011 04:31:17.034017 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/57b977e1-ad34-4a35-a31e-fe6e1a0b32ee-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-bb949\" (UID: \"57b977e1-ad34-4a35-a31e-fe6e1a0b32ee\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-bb949" Oct 11 04:31:17 crc kubenswrapper[4798]: I1011 04:31:17.034538 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/57b977e1-ad34-4a35-a31e-fe6e1a0b32ee-ceph\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-bb949\" (UID: \"57b977e1-ad34-4a35-a31e-fe6e1a0b32ee\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-bb949" Oct 11 04:31:17 crc kubenswrapper[4798]: I1011 04:31:17.045043 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wcxvn\" (UniqueName: \"kubernetes.io/projected/57b977e1-ad34-4a35-a31e-fe6e1a0b32ee-kube-api-access-wcxvn\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-bb949\" (UID: \"57b977e1-ad34-4a35-a31e-fe6e1a0b32ee\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-bb949" Oct 11 04:31:17 crc kubenswrapper[4798]: I1011 04:31:17.189851 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-bb949" Oct 11 04:31:17 crc kubenswrapper[4798]: I1011 04:31:17.838028 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-bb949"] Oct 11 04:31:18 crc kubenswrapper[4798]: I1011 04:31:18.796187 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-bb949" event={"ID":"57b977e1-ad34-4a35-a31e-fe6e1a0b32ee","Type":"ContainerStarted","Data":"9157a491a6d9949a1d3246a11e5aabf242a4269b73165d76cd8d894706b5f2c1"} Oct 11 04:31:18 crc kubenswrapper[4798]: I1011 04:31:18.796709 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-bb949" event={"ID":"57b977e1-ad34-4a35-a31e-fe6e1a0b32ee","Type":"ContainerStarted","Data":"717e19eaa2b73fffab87e41136b2400885520f608ac62856043efc0bea859a40"} Oct 11 04:31:23 crc kubenswrapper[4798]: I1011 04:31:23.848990 4798 generic.go:334] "Generic (PLEG): container finished" podID="57b977e1-ad34-4a35-a31e-fe6e1a0b32ee" containerID="9157a491a6d9949a1d3246a11e5aabf242a4269b73165d76cd8d894706b5f2c1" exitCode=0 Oct 11 04:31:23 crc kubenswrapper[4798]: I1011 04:31:23.849118 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-bb949" event={"ID":"57b977e1-ad34-4a35-a31e-fe6e1a0b32ee","Type":"ContainerDied","Data":"9157a491a6d9949a1d3246a11e5aabf242a4269b73165d76cd8d894706b5f2c1"} Oct 11 04:31:25 crc kubenswrapper[4798]: I1011 04:31:25.307462 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-bb949" Oct 11 04:31:25 crc kubenswrapper[4798]: I1011 04:31:25.393964 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/57b977e1-ad34-4a35-a31e-fe6e1a0b32ee-ssh-key\") pod \"57b977e1-ad34-4a35-a31e-fe6e1a0b32ee\" (UID: \"57b977e1-ad34-4a35-a31e-fe6e1a0b32ee\") " Oct 11 04:31:25 crc kubenswrapper[4798]: I1011 04:31:25.394098 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/57b977e1-ad34-4a35-a31e-fe6e1a0b32ee-inventory\") pod \"57b977e1-ad34-4a35-a31e-fe6e1a0b32ee\" (UID: \"57b977e1-ad34-4a35-a31e-fe6e1a0b32ee\") " Oct 11 04:31:25 crc kubenswrapper[4798]: I1011 04:31:25.394129 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/57b977e1-ad34-4a35-a31e-fe6e1a0b32ee-ceph\") pod \"57b977e1-ad34-4a35-a31e-fe6e1a0b32ee\" (UID: \"57b977e1-ad34-4a35-a31e-fe6e1a0b32ee\") " Oct 11 04:31:25 crc kubenswrapper[4798]: I1011 04:31:25.394280 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wcxvn\" (UniqueName: \"kubernetes.io/projected/57b977e1-ad34-4a35-a31e-fe6e1a0b32ee-kube-api-access-wcxvn\") pod \"57b977e1-ad34-4a35-a31e-fe6e1a0b32ee\" (UID: \"57b977e1-ad34-4a35-a31e-fe6e1a0b32ee\") " Oct 11 04:31:25 crc kubenswrapper[4798]: I1011 04:31:25.413328 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/57b977e1-ad34-4a35-a31e-fe6e1a0b32ee-ceph" (OuterVolumeSpecName: "ceph") pod "57b977e1-ad34-4a35-a31e-fe6e1a0b32ee" (UID: "57b977e1-ad34-4a35-a31e-fe6e1a0b32ee"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:31:25 crc kubenswrapper[4798]: I1011 04:31:25.413546 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/57b977e1-ad34-4a35-a31e-fe6e1a0b32ee-kube-api-access-wcxvn" (OuterVolumeSpecName: "kube-api-access-wcxvn") pod "57b977e1-ad34-4a35-a31e-fe6e1a0b32ee" (UID: "57b977e1-ad34-4a35-a31e-fe6e1a0b32ee"). InnerVolumeSpecName "kube-api-access-wcxvn". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 04:31:25 crc kubenswrapper[4798]: I1011 04:31:25.429617 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/57b977e1-ad34-4a35-a31e-fe6e1a0b32ee-inventory" (OuterVolumeSpecName: "inventory") pod "57b977e1-ad34-4a35-a31e-fe6e1a0b32ee" (UID: "57b977e1-ad34-4a35-a31e-fe6e1a0b32ee"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:31:25 crc kubenswrapper[4798]: I1011 04:31:25.438143 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/57b977e1-ad34-4a35-a31e-fe6e1a0b32ee-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "57b977e1-ad34-4a35-a31e-fe6e1a0b32ee" (UID: "57b977e1-ad34-4a35-a31e-fe6e1a0b32ee"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:31:25 crc kubenswrapper[4798]: I1011 04:31:25.496777 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wcxvn\" (UniqueName: \"kubernetes.io/projected/57b977e1-ad34-4a35-a31e-fe6e1a0b32ee-kube-api-access-wcxvn\") on node \"crc\" DevicePath \"\"" Oct 11 04:31:25 crc kubenswrapper[4798]: I1011 04:31:25.496812 4798 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/57b977e1-ad34-4a35-a31e-fe6e1a0b32ee-ssh-key\") on node \"crc\" DevicePath \"\"" Oct 11 04:31:25 crc kubenswrapper[4798]: I1011 04:31:25.496821 4798 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/57b977e1-ad34-4a35-a31e-fe6e1a0b32ee-inventory\") on node \"crc\" DevicePath \"\"" Oct 11 04:31:25 crc kubenswrapper[4798]: I1011 04:31:25.496830 4798 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/57b977e1-ad34-4a35-a31e-fe6e1a0b32ee-ceph\") on node \"crc\" DevicePath \"\"" Oct 11 04:31:25 crc kubenswrapper[4798]: I1011 04:31:25.877028 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-bb949" event={"ID":"57b977e1-ad34-4a35-a31e-fe6e1a0b32ee","Type":"ContainerDied","Data":"717e19eaa2b73fffab87e41136b2400885520f608ac62856043efc0bea859a40"} Oct 11 04:31:25 crc kubenswrapper[4798]: I1011 04:31:25.877531 4798 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="717e19eaa2b73fffab87e41136b2400885520f608ac62856043efc0bea859a40" Oct 11 04:31:25 crc kubenswrapper[4798]: I1011 04:31:25.877092 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-bb949" Oct 11 04:31:26 crc kubenswrapper[4798]: I1011 04:31:26.038693 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-4fcfm"] Oct 11 04:31:26 crc kubenswrapper[4798]: E1011 04:31:26.039166 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="57b977e1-ad34-4a35-a31e-fe6e1a0b32ee" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Oct 11 04:31:26 crc kubenswrapper[4798]: I1011 04:31:26.039186 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="57b977e1-ad34-4a35-a31e-fe6e1a0b32ee" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Oct 11 04:31:26 crc kubenswrapper[4798]: I1011 04:31:26.039470 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="57b977e1-ad34-4a35-a31e-fe6e1a0b32ee" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Oct 11 04:31:26 crc kubenswrapper[4798]: I1011 04:31:26.040091 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-4fcfm" Oct 11 04:31:26 crc kubenswrapper[4798]: I1011 04:31:26.044041 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceph-conf-files" Oct 11 04:31:26 crc kubenswrapper[4798]: I1011 04:31:26.044100 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-mgsh6" Oct 11 04:31:26 crc kubenswrapper[4798]: I1011 04:31:26.044175 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Oct 11 04:31:26 crc kubenswrapper[4798]: I1011 04:31:26.044245 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Oct 11 04:31:26 crc kubenswrapper[4798]: I1011 04:31:26.044122 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Oct 11 04:31:26 crc kubenswrapper[4798]: I1011 04:31:26.048714 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-4fcfm"] Oct 11 04:31:26 crc kubenswrapper[4798]: I1011 04:31:26.209603 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/c8977d13-c86f-494d-a02e-b617e5e27fdb-ceph\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-4fcfm\" (UID: \"c8977d13-c86f-494d-a02e-b617e5e27fdb\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-4fcfm" Oct 11 04:31:26 crc kubenswrapper[4798]: I1011 04:31:26.209898 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c8977d13-c86f-494d-a02e-b617e5e27fdb-ssh-key\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-4fcfm\" (UID: \"c8977d13-c86f-494d-a02e-b617e5e27fdb\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-4fcfm" Oct 11 04:31:26 crc kubenswrapper[4798]: I1011 04:31:26.210086 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2qngv\" (UniqueName: \"kubernetes.io/projected/c8977d13-c86f-494d-a02e-b617e5e27fdb-kube-api-access-2qngv\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-4fcfm\" (UID: \"c8977d13-c86f-494d-a02e-b617e5e27fdb\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-4fcfm" Oct 11 04:31:26 crc kubenswrapper[4798]: I1011 04:31:26.210185 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c8977d13-c86f-494d-a02e-b617e5e27fdb-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-4fcfm\" (UID: \"c8977d13-c86f-494d-a02e-b617e5e27fdb\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-4fcfm" Oct 11 04:31:26 crc kubenswrapper[4798]: I1011 04:31:26.311752 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2qngv\" (UniqueName: \"kubernetes.io/projected/c8977d13-c86f-494d-a02e-b617e5e27fdb-kube-api-access-2qngv\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-4fcfm\" (UID: \"c8977d13-c86f-494d-a02e-b617e5e27fdb\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-4fcfm" Oct 11 04:31:26 crc kubenswrapper[4798]: I1011 04:31:26.311811 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c8977d13-c86f-494d-a02e-b617e5e27fdb-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-4fcfm\" (UID: \"c8977d13-c86f-494d-a02e-b617e5e27fdb\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-4fcfm" Oct 11 04:31:26 crc kubenswrapper[4798]: I1011 04:31:26.311855 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/c8977d13-c86f-494d-a02e-b617e5e27fdb-ceph\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-4fcfm\" (UID: \"c8977d13-c86f-494d-a02e-b617e5e27fdb\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-4fcfm" Oct 11 04:31:26 crc kubenswrapper[4798]: I1011 04:31:26.311880 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c8977d13-c86f-494d-a02e-b617e5e27fdb-ssh-key\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-4fcfm\" (UID: \"c8977d13-c86f-494d-a02e-b617e5e27fdb\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-4fcfm" Oct 11 04:31:26 crc kubenswrapper[4798]: I1011 04:31:26.316121 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c8977d13-c86f-494d-a02e-b617e5e27fdb-ssh-key\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-4fcfm\" (UID: \"c8977d13-c86f-494d-a02e-b617e5e27fdb\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-4fcfm" Oct 11 04:31:26 crc kubenswrapper[4798]: I1011 04:31:26.316142 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/c8977d13-c86f-494d-a02e-b617e5e27fdb-ceph\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-4fcfm\" (UID: \"c8977d13-c86f-494d-a02e-b617e5e27fdb\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-4fcfm" Oct 11 04:31:26 crc kubenswrapper[4798]: I1011 04:31:26.320035 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c8977d13-c86f-494d-a02e-b617e5e27fdb-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-4fcfm\" (UID: \"c8977d13-c86f-494d-a02e-b617e5e27fdb\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-4fcfm" Oct 11 04:31:26 crc kubenswrapper[4798]: I1011 04:31:26.327467 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2qngv\" (UniqueName: \"kubernetes.io/projected/c8977d13-c86f-494d-a02e-b617e5e27fdb-kube-api-access-2qngv\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-4fcfm\" (UID: \"c8977d13-c86f-494d-a02e-b617e5e27fdb\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-4fcfm" Oct 11 04:31:26 crc kubenswrapper[4798]: I1011 04:31:26.362951 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-4fcfm" Oct 11 04:31:26 crc kubenswrapper[4798]: I1011 04:31:26.851275 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-4fcfm"] Oct 11 04:31:26 crc kubenswrapper[4798]: I1011 04:31:26.890789 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-4fcfm" event={"ID":"c8977d13-c86f-494d-a02e-b617e5e27fdb","Type":"ContainerStarted","Data":"9901d81d2d669c71cf03826ed69e03e45f2f6744e5394084899ad38a4ce4f015"} Oct 11 04:31:27 crc kubenswrapper[4798]: I1011 04:31:27.138022 4798 patch_prober.go:28] interesting pod/machine-config-daemon-h28s2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 11 04:31:27 crc kubenswrapper[4798]: I1011 04:31:27.138463 4798 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 11 04:31:27 crc kubenswrapper[4798]: I1011 04:31:27.138510 4798 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" Oct 11 04:31:27 crc kubenswrapper[4798]: I1011 04:31:27.139382 4798 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"e764c82b5ef4905e7c6c502957bfa38bb423ee4a4677ee706a5d2b87d33d2c1c"} pod="openshift-machine-config-operator/machine-config-daemon-h28s2" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Oct 11 04:31:27 crc kubenswrapper[4798]: I1011 04:31:27.139483 4798 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" containerName="machine-config-daemon" containerID="cri-o://e764c82b5ef4905e7c6c502957bfa38bb423ee4a4677ee706a5d2b87d33d2c1c" gracePeriod=600 Oct 11 04:31:27 crc kubenswrapper[4798]: E1011 04:31:27.262208 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h28s2_openshift-machine-config-operator(42571bc8-2186-4e3b-bba9-28f5a8f364d0)\"" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" Oct 11 04:31:27 crc kubenswrapper[4798]: I1011 04:31:27.901605 4798 generic.go:334] "Generic (PLEG): container finished" podID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" containerID="e764c82b5ef4905e7c6c502957bfa38bb423ee4a4677ee706a5d2b87d33d2c1c" exitCode=0 Oct 11 04:31:27 crc kubenswrapper[4798]: I1011 04:31:27.901667 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" event={"ID":"42571bc8-2186-4e3b-bba9-28f5a8f364d0","Type":"ContainerDied","Data":"e764c82b5ef4905e7c6c502957bfa38bb423ee4a4677ee706a5d2b87d33d2c1c"} Oct 11 04:31:27 crc kubenswrapper[4798]: I1011 04:31:27.902210 4798 scope.go:117] "RemoveContainer" containerID="79815571603bad34e91dc55f6905e3697ce42c89636d6c2b96147d668afa53e5" Oct 11 04:31:27 crc kubenswrapper[4798]: I1011 04:31:27.902922 4798 scope.go:117] "RemoveContainer" containerID="e764c82b5ef4905e7c6c502957bfa38bb423ee4a4677ee706a5d2b87d33d2c1c" Oct 11 04:31:27 crc kubenswrapper[4798]: E1011 04:31:27.903377 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h28s2_openshift-machine-config-operator(42571bc8-2186-4e3b-bba9-28f5a8f364d0)\"" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" Oct 11 04:31:27 crc kubenswrapper[4798]: I1011 04:31:27.906942 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-4fcfm" event={"ID":"c8977d13-c86f-494d-a02e-b617e5e27fdb","Type":"ContainerStarted","Data":"99cc644bfa438482f17d25452158665aaa00f45f01a8fd8e3f397ea96a30d36c"} Oct 11 04:31:27 crc kubenswrapper[4798]: I1011 04:31:27.946645 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-4fcfm" podStartSLOduration=1.5042042869999999 podStartE2EDuration="1.946629621s" podCreationTimestamp="2025-10-11 04:31:26 +0000 UTC" firstStartedPulling="2025-10-11 04:31:26.862999385 +0000 UTC m=+2182.199289071" lastFinishedPulling="2025-10-11 04:31:27.305424699 +0000 UTC m=+2182.641714405" observedRunningTime="2025-10-11 04:31:27.942087713 +0000 UTC m=+2183.278377439" watchObservedRunningTime="2025-10-11 04:31:27.946629621 +0000 UTC m=+2183.282919307" Oct 11 04:31:42 crc kubenswrapper[4798]: I1011 04:31:42.424212 4798 scope.go:117] "RemoveContainer" containerID="e764c82b5ef4905e7c6c502957bfa38bb423ee4a4677ee706a5d2b87d33d2c1c" Oct 11 04:31:42 crc kubenswrapper[4798]: E1011 04:31:42.425021 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h28s2_openshift-machine-config-operator(42571bc8-2186-4e3b-bba9-28f5a8f364d0)\"" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" Oct 11 04:31:57 crc kubenswrapper[4798]: I1011 04:31:57.424895 4798 scope.go:117] "RemoveContainer" containerID="e764c82b5ef4905e7c6c502957bfa38bb423ee4a4677ee706a5d2b87d33d2c1c" Oct 11 04:31:57 crc kubenswrapper[4798]: E1011 04:31:57.425934 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h28s2_openshift-machine-config-operator(42571bc8-2186-4e3b-bba9-28f5a8f364d0)\"" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" Oct 11 04:32:04 crc kubenswrapper[4798]: I1011 04:32:04.245785 4798 generic.go:334] "Generic (PLEG): container finished" podID="c8977d13-c86f-494d-a02e-b617e5e27fdb" containerID="99cc644bfa438482f17d25452158665aaa00f45f01a8fd8e3f397ea96a30d36c" exitCode=0 Oct 11 04:32:04 crc kubenswrapper[4798]: I1011 04:32:04.245853 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-4fcfm" event={"ID":"c8977d13-c86f-494d-a02e-b617e5e27fdb","Type":"ContainerDied","Data":"99cc644bfa438482f17d25452158665aaa00f45f01a8fd8e3f397ea96a30d36c"} Oct 11 04:32:05 crc kubenswrapper[4798]: I1011 04:32:05.742223 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-4fcfm" Oct 11 04:32:05 crc kubenswrapper[4798]: I1011 04:32:05.849127 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c8977d13-c86f-494d-a02e-b617e5e27fdb-ssh-key\") pod \"c8977d13-c86f-494d-a02e-b617e5e27fdb\" (UID: \"c8977d13-c86f-494d-a02e-b617e5e27fdb\") " Oct 11 04:32:05 crc kubenswrapper[4798]: I1011 04:32:05.849196 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c8977d13-c86f-494d-a02e-b617e5e27fdb-inventory\") pod \"c8977d13-c86f-494d-a02e-b617e5e27fdb\" (UID: \"c8977d13-c86f-494d-a02e-b617e5e27fdb\") " Oct 11 04:32:05 crc kubenswrapper[4798]: I1011 04:32:05.849264 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2qngv\" (UniqueName: \"kubernetes.io/projected/c8977d13-c86f-494d-a02e-b617e5e27fdb-kube-api-access-2qngv\") pod \"c8977d13-c86f-494d-a02e-b617e5e27fdb\" (UID: \"c8977d13-c86f-494d-a02e-b617e5e27fdb\") " Oct 11 04:32:05 crc kubenswrapper[4798]: I1011 04:32:05.849427 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/c8977d13-c86f-494d-a02e-b617e5e27fdb-ceph\") pod \"c8977d13-c86f-494d-a02e-b617e5e27fdb\" (UID: \"c8977d13-c86f-494d-a02e-b617e5e27fdb\") " Oct 11 04:32:05 crc kubenswrapper[4798]: I1011 04:32:05.859844 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c8977d13-c86f-494d-a02e-b617e5e27fdb-kube-api-access-2qngv" (OuterVolumeSpecName: "kube-api-access-2qngv") pod "c8977d13-c86f-494d-a02e-b617e5e27fdb" (UID: "c8977d13-c86f-494d-a02e-b617e5e27fdb"). InnerVolumeSpecName "kube-api-access-2qngv". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 04:32:05 crc kubenswrapper[4798]: I1011 04:32:05.871408 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c8977d13-c86f-494d-a02e-b617e5e27fdb-ceph" (OuterVolumeSpecName: "ceph") pod "c8977d13-c86f-494d-a02e-b617e5e27fdb" (UID: "c8977d13-c86f-494d-a02e-b617e5e27fdb"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:32:05 crc kubenswrapper[4798]: I1011 04:32:05.924149 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c8977d13-c86f-494d-a02e-b617e5e27fdb-inventory" (OuterVolumeSpecName: "inventory") pod "c8977d13-c86f-494d-a02e-b617e5e27fdb" (UID: "c8977d13-c86f-494d-a02e-b617e5e27fdb"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:32:05 crc kubenswrapper[4798]: I1011 04:32:05.933643 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c8977d13-c86f-494d-a02e-b617e5e27fdb-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "c8977d13-c86f-494d-a02e-b617e5e27fdb" (UID: "c8977d13-c86f-494d-a02e-b617e5e27fdb"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:32:05 crc kubenswrapper[4798]: I1011 04:32:05.951302 4798 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c8977d13-c86f-494d-a02e-b617e5e27fdb-ssh-key\") on node \"crc\" DevicePath \"\"" Oct 11 04:32:05 crc kubenswrapper[4798]: I1011 04:32:05.951342 4798 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c8977d13-c86f-494d-a02e-b617e5e27fdb-inventory\") on node \"crc\" DevicePath \"\"" Oct 11 04:32:05 crc kubenswrapper[4798]: I1011 04:32:05.951356 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2qngv\" (UniqueName: \"kubernetes.io/projected/c8977d13-c86f-494d-a02e-b617e5e27fdb-kube-api-access-2qngv\") on node \"crc\" DevicePath \"\"" Oct 11 04:32:05 crc kubenswrapper[4798]: I1011 04:32:05.951372 4798 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/c8977d13-c86f-494d-a02e-b617e5e27fdb-ceph\") on node \"crc\" DevicePath \"\"" Oct 11 04:32:06 crc kubenswrapper[4798]: I1011 04:32:06.288490 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-4fcfm" event={"ID":"c8977d13-c86f-494d-a02e-b617e5e27fdb","Type":"ContainerDied","Data":"9901d81d2d669c71cf03826ed69e03e45f2f6744e5394084899ad38a4ce4f015"} Oct 11 04:32:06 crc kubenswrapper[4798]: I1011 04:32:06.288536 4798 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9901d81d2d669c71cf03826ed69e03e45f2f6744e5394084899ad38a4ce4f015" Oct 11 04:32:06 crc kubenswrapper[4798]: I1011 04:32:06.288615 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-4fcfm" Oct 11 04:32:06 crc kubenswrapper[4798]: I1011 04:32:06.381246 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-xjwr9"] Oct 11 04:32:06 crc kubenswrapper[4798]: E1011 04:32:06.382782 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c8977d13-c86f-494d-a02e-b617e5e27fdb" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Oct 11 04:32:06 crc kubenswrapper[4798]: I1011 04:32:06.382813 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="c8977d13-c86f-494d-a02e-b617e5e27fdb" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Oct 11 04:32:06 crc kubenswrapper[4798]: I1011 04:32:06.383043 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="c8977d13-c86f-494d-a02e-b617e5e27fdb" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Oct 11 04:32:06 crc kubenswrapper[4798]: I1011 04:32:06.383816 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-xjwr9" Oct 11 04:32:06 crc kubenswrapper[4798]: I1011 04:32:06.386644 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Oct 11 04:32:06 crc kubenswrapper[4798]: I1011 04:32:06.387029 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-mgsh6" Oct 11 04:32:06 crc kubenswrapper[4798]: I1011 04:32:06.387298 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceph-conf-files" Oct 11 04:32:06 crc kubenswrapper[4798]: I1011 04:32:06.387558 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Oct 11 04:32:06 crc kubenswrapper[4798]: I1011 04:32:06.391242 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Oct 11 04:32:06 crc kubenswrapper[4798]: I1011 04:32:06.402618 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-xjwr9"] Oct 11 04:32:06 crc kubenswrapper[4798]: I1011 04:32:06.562412 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/29216d22-5411-4bbd-b2c3-f643df3218c0-ceph\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-xjwr9\" (UID: \"29216d22-5411-4bbd-b2c3-f643df3218c0\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-xjwr9" Oct 11 04:32:06 crc kubenswrapper[4798]: I1011 04:32:06.562569 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/29216d22-5411-4bbd-b2c3-f643df3218c0-ssh-key\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-xjwr9\" (UID: \"29216d22-5411-4bbd-b2c3-f643df3218c0\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-xjwr9" Oct 11 04:32:06 crc kubenswrapper[4798]: I1011 04:32:06.562635 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/29216d22-5411-4bbd-b2c3-f643df3218c0-inventory\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-xjwr9\" (UID: \"29216d22-5411-4bbd-b2c3-f643df3218c0\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-xjwr9" Oct 11 04:32:06 crc kubenswrapper[4798]: I1011 04:32:06.562761 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xm4ql\" (UniqueName: \"kubernetes.io/projected/29216d22-5411-4bbd-b2c3-f643df3218c0-kube-api-access-xm4ql\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-xjwr9\" (UID: \"29216d22-5411-4bbd-b2c3-f643df3218c0\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-xjwr9" Oct 11 04:32:06 crc kubenswrapper[4798]: I1011 04:32:06.664195 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/29216d22-5411-4bbd-b2c3-f643df3218c0-ceph\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-xjwr9\" (UID: \"29216d22-5411-4bbd-b2c3-f643df3218c0\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-xjwr9" Oct 11 04:32:06 crc kubenswrapper[4798]: I1011 04:32:06.664758 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/29216d22-5411-4bbd-b2c3-f643df3218c0-ssh-key\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-xjwr9\" (UID: \"29216d22-5411-4bbd-b2c3-f643df3218c0\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-xjwr9" Oct 11 04:32:06 crc kubenswrapper[4798]: I1011 04:32:06.664968 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/29216d22-5411-4bbd-b2c3-f643df3218c0-inventory\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-xjwr9\" (UID: \"29216d22-5411-4bbd-b2c3-f643df3218c0\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-xjwr9" Oct 11 04:32:06 crc kubenswrapper[4798]: I1011 04:32:06.665186 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xm4ql\" (UniqueName: \"kubernetes.io/projected/29216d22-5411-4bbd-b2c3-f643df3218c0-kube-api-access-xm4ql\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-xjwr9\" (UID: \"29216d22-5411-4bbd-b2c3-f643df3218c0\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-xjwr9" Oct 11 04:32:06 crc kubenswrapper[4798]: I1011 04:32:06.673139 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/29216d22-5411-4bbd-b2c3-f643df3218c0-ceph\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-xjwr9\" (UID: \"29216d22-5411-4bbd-b2c3-f643df3218c0\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-xjwr9" Oct 11 04:32:06 crc kubenswrapper[4798]: I1011 04:32:06.674255 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/29216d22-5411-4bbd-b2c3-f643df3218c0-inventory\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-xjwr9\" (UID: \"29216d22-5411-4bbd-b2c3-f643df3218c0\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-xjwr9" Oct 11 04:32:06 crc kubenswrapper[4798]: I1011 04:32:06.686563 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/29216d22-5411-4bbd-b2c3-f643df3218c0-ssh-key\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-xjwr9\" (UID: \"29216d22-5411-4bbd-b2c3-f643df3218c0\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-xjwr9" Oct 11 04:32:06 crc kubenswrapper[4798]: I1011 04:32:06.687688 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xm4ql\" (UniqueName: \"kubernetes.io/projected/29216d22-5411-4bbd-b2c3-f643df3218c0-kube-api-access-xm4ql\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-xjwr9\" (UID: \"29216d22-5411-4bbd-b2c3-f643df3218c0\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-xjwr9" Oct 11 04:32:06 crc kubenswrapper[4798]: I1011 04:32:06.723328 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-xjwr9" Oct 11 04:32:07 crc kubenswrapper[4798]: I1011 04:32:07.101689 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-xjwr9"] Oct 11 04:32:07 crc kubenswrapper[4798]: W1011 04:32:07.110585 4798 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod29216d22_5411_4bbd_b2c3_f643df3218c0.slice/crio-cb4d2d0c5c40768e9d8eb73f841209ff702667a9cc9ac584e09f5ccfa8b804bd WatchSource:0}: Error finding container cb4d2d0c5c40768e9d8eb73f841209ff702667a9cc9ac584e09f5ccfa8b804bd: Status 404 returned error can't find the container with id cb4d2d0c5c40768e9d8eb73f841209ff702667a9cc9ac584e09f5ccfa8b804bd Oct 11 04:32:07 crc kubenswrapper[4798]: I1011 04:32:07.295892 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-xjwr9" event={"ID":"29216d22-5411-4bbd-b2c3-f643df3218c0","Type":"ContainerStarted","Data":"cb4d2d0c5c40768e9d8eb73f841209ff702667a9cc9ac584e09f5ccfa8b804bd"} Oct 11 04:32:08 crc kubenswrapper[4798]: I1011 04:32:08.310974 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-xjwr9" event={"ID":"29216d22-5411-4bbd-b2c3-f643df3218c0","Type":"ContainerStarted","Data":"ccc47952d542943417a78058f4cd82c98b46b860ab2738e39dcdfaf62cb91eeb"} Oct 11 04:32:08 crc kubenswrapper[4798]: I1011 04:32:08.424677 4798 scope.go:117] "RemoveContainer" containerID="e764c82b5ef4905e7c6c502957bfa38bb423ee4a4677ee706a5d2b87d33d2c1c" Oct 11 04:32:08 crc kubenswrapper[4798]: E1011 04:32:08.425004 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h28s2_openshift-machine-config-operator(42571bc8-2186-4e3b-bba9-28f5a8f364d0)\"" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" Oct 11 04:32:12 crc kubenswrapper[4798]: I1011 04:32:12.351365 4798 generic.go:334] "Generic (PLEG): container finished" podID="29216d22-5411-4bbd-b2c3-f643df3218c0" containerID="ccc47952d542943417a78058f4cd82c98b46b860ab2738e39dcdfaf62cb91eeb" exitCode=0 Oct 11 04:32:12 crc kubenswrapper[4798]: I1011 04:32:12.351498 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-xjwr9" event={"ID":"29216d22-5411-4bbd-b2c3-f643df3218c0","Type":"ContainerDied","Data":"ccc47952d542943417a78058f4cd82c98b46b860ab2738e39dcdfaf62cb91eeb"} Oct 11 04:32:13 crc kubenswrapper[4798]: I1011 04:32:13.768192 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-c4ltn"] Oct 11 04:32:13 crc kubenswrapper[4798]: I1011 04:32:13.774096 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-c4ltn" Oct 11 04:32:13 crc kubenswrapper[4798]: I1011 04:32:13.780442 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-c4ltn"] Oct 11 04:32:13 crc kubenswrapper[4798]: I1011 04:32:13.882729 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-xjwr9" Oct 11 04:32:13 crc kubenswrapper[4798]: I1011 04:32:13.914002 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ce103d1b-59c7-46ee-8f9a-917ad3d26822-catalog-content\") pod \"community-operators-c4ltn\" (UID: \"ce103d1b-59c7-46ee-8f9a-917ad3d26822\") " pod="openshift-marketplace/community-operators-c4ltn" Oct 11 04:32:13 crc kubenswrapper[4798]: I1011 04:32:13.914087 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bfww7\" (UniqueName: \"kubernetes.io/projected/ce103d1b-59c7-46ee-8f9a-917ad3d26822-kube-api-access-bfww7\") pod \"community-operators-c4ltn\" (UID: \"ce103d1b-59c7-46ee-8f9a-917ad3d26822\") " pod="openshift-marketplace/community-operators-c4ltn" Oct 11 04:32:13 crc kubenswrapper[4798]: I1011 04:32:13.914205 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ce103d1b-59c7-46ee-8f9a-917ad3d26822-utilities\") pod \"community-operators-c4ltn\" (UID: \"ce103d1b-59c7-46ee-8f9a-917ad3d26822\") " pod="openshift-marketplace/community-operators-c4ltn" Oct 11 04:32:14 crc kubenswrapper[4798]: I1011 04:32:14.015906 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/29216d22-5411-4bbd-b2c3-f643df3218c0-ceph\") pod \"29216d22-5411-4bbd-b2c3-f643df3218c0\" (UID: \"29216d22-5411-4bbd-b2c3-f643df3218c0\") " Oct 11 04:32:14 crc kubenswrapper[4798]: I1011 04:32:14.016017 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xm4ql\" (UniqueName: \"kubernetes.io/projected/29216d22-5411-4bbd-b2c3-f643df3218c0-kube-api-access-xm4ql\") pod \"29216d22-5411-4bbd-b2c3-f643df3218c0\" (UID: \"29216d22-5411-4bbd-b2c3-f643df3218c0\") " Oct 11 04:32:14 crc kubenswrapper[4798]: I1011 04:32:14.016105 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/29216d22-5411-4bbd-b2c3-f643df3218c0-inventory\") pod \"29216d22-5411-4bbd-b2c3-f643df3218c0\" (UID: \"29216d22-5411-4bbd-b2c3-f643df3218c0\") " Oct 11 04:32:14 crc kubenswrapper[4798]: I1011 04:32:14.016138 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/29216d22-5411-4bbd-b2c3-f643df3218c0-ssh-key\") pod \"29216d22-5411-4bbd-b2c3-f643df3218c0\" (UID: \"29216d22-5411-4bbd-b2c3-f643df3218c0\") " Oct 11 04:32:14 crc kubenswrapper[4798]: I1011 04:32:14.016453 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ce103d1b-59c7-46ee-8f9a-917ad3d26822-utilities\") pod \"community-operators-c4ltn\" (UID: \"ce103d1b-59c7-46ee-8f9a-917ad3d26822\") " pod="openshift-marketplace/community-operators-c4ltn" Oct 11 04:32:14 crc kubenswrapper[4798]: I1011 04:32:14.016546 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ce103d1b-59c7-46ee-8f9a-917ad3d26822-catalog-content\") pod \"community-operators-c4ltn\" (UID: \"ce103d1b-59c7-46ee-8f9a-917ad3d26822\") " pod="openshift-marketplace/community-operators-c4ltn" Oct 11 04:32:14 crc kubenswrapper[4798]: I1011 04:32:14.016607 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bfww7\" (UniqueName: \"kubernetes.io/projected/ce103d1b-59c7-46ee-8f9a-917ad3d26822-kube-api-access-bfww7\") pod \"community-operators-c4ltn\" (UID: \"ce103d1b-59c7-46ee-8f9a-917ad3d26822\") " pod="openshift-marketplace/community-operators-c4ltn" Oct 11 04:32:14 crc kubenswrapper[4798]: I1011 04:32:14.017422 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ce103d1b-59c7-46ee-8f9a-917ad3d26822-catalog-content\") pod \"community-operators-c4ltn\" (UID: \"ce103d1b-59c7-46ee-8f9a-917ad3d26822\") " pod="openshift-marketplace/community-operators-c4ltn" Oct 11 04:32:14 crc kubenswrapper[4798]: I1011 04:32:14.018698 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ce103d1b-59c7-46ee-8f9a-917ad3d26822-utilities\") pod \"community-operators-c4ltn\" (UID: \"ce103d1b-59c7-46ee-8f9a-917ad3d26822\") " pod="openshift-marketplace/community-operators-c4ltn" Oct 11 04:32:14 crc kubenswrapper[4798]: I1011 04:32:14.022505 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/29216d22-5411-4bbd-b2c3-f643df3218c0-kube-api-access-xm4ql" (OuterVolumeSpecName: "kube-api-access-xm4ql") pod "29216d22-5411-4bbd-b2c3-f643df3218c0" (UID: "29216d22-5411-4bbd-b2c3-f643df3218c0"). InnerVolumeSpecName "kube-api-access-xm4ql". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 04:32:14 crc kubenswrapper[4798]: I1011 04:32:14.022999 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/29216d22-5411-4bbd-b2c3-f643df3218c0-ceph" (OuterVolumeSpecName: "ceph") pod "29216d22-5411-4bbd-b2c3-f643df3218c0" (UID: "29216d22-5411-4bbd-b2c3-f643df3218c0"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:32:14 crc kubenswrapper[4798]: I1011 04:32:14.041994 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bfww7\" (UniqueName: \"kubernetes.io/projected/ce103d1b-59c7-46ee-8f9a-917ad3d26822-kube-api-access-bfww7\") pod \"community-operators-c4ltn\" (UID: \"ce103d1b-59c7-46ee-8f9a-917ad3d26822\") " pod="openshift-marketplace/community-operators-c4ltn" Oct 11 04:32:14 crc kubenswrapper[4798]: I1011 04:32:14.043195 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/29216d22-5411-4bbd-b2c3-f643df3218c0-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "29216d22-5411-4bbd-b2c3-f643df3218c0" (UID: "29216d22-5411-4bbd-b2c3-f643df3218c0"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:32:14 crc kubenswrapper[4798]: I1011 04:32:14.071661 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/29216d22-5411-4bbd-b2c3-f643df3218c0-inventory" (OuterVolumeSpecName: "inventory") pod "29216d22-5411-4bbd-b2c3-f643df3218c0" (UID: "29216d22-5411-4bbd-b2c3-f643df3218c0"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:32:14 crc kubenswrapper[4798]: I1011 04:32:14.105911 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-c4ltn" Oct 11 04:32:14 crc kubenswrapper[4798]: I1011 04:32:14.119075 4798 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/29216d22-5411-4bbd-b2c3-f643df3218c0-ceph\") on node \"crc\" DevicePath \"\"" Oct 11 04:32:14 crc kubenswrapper[4798]: I1011 04:32:14.119113 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xm4ql\" (UniqueName: \"kubernetes.io/projected/29216d22-5411-4bbd-b2c3-f643df3218c0-kube-api-access-xm4ql\") on node \"crc\" DevicePath \"\"" Oct 11 04:32:14 crc kubenswrapper[4798]: I1011 04:32:14.119131 4798 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/29216d22-5411-4bbd-b2c3-f643df3218c0-inventory\") on node \"crc\" DevicePath \"\"" Oct 11 04:32:14 crc kubenswrapper[4798]: I1011 04:32:14.119143 4798 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/29216d22-5411-4bbd-b2c3-f643df3218c0-ssh-key\") on node \"crc\" DevicePath \"\"" Oct 11 04:32:14 crc kubenswrapper[4798]: I1011 04:32:14.375351 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-xjwr9" event={"ID":"29216d22-5411-4bbd-b2c3-f643df3218c0","Type":"ContainerDied","Data":"cb4d2d0c5c40768e9d8eb73f841209ff702667a9cc9ac584e09f5ccfa8b804bd"} Oct 11 04:32:14 crc kubenswrapper[4798]: I1011 04:32:14.375418 4798 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="cb4d2d0c5c40768e9d8eb73f841209ff702667a9cc9ac584e09f5ccfa8b804bd" Oct 11 04:32:14 crc kubenswrapper[4798]: I1011 04:32:14.375568 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-xjwr9" Oct 11 04:32:14 crc kubenswrapper[4798]: I1011 04:32:14.458107 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-8vffc"] Oct 11 04:32:14 crc kubenswrapper[4798]: E1011 04:32:14.458535 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="29216d22-5411-4bbd-b2c3-f643df3218c0" containerName="ceph-hci-pre-edpm-deployment-openstack-edpm-ipam" Oct 11 04:32:14 crc kubenswrapper[4798]: I1011 04:32:14.458553 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="29216d22-5411-4bbd-b2c3-f643df3218c0" containerName="ceph-hci-pre-edpm-deployment-openstack-edpm-ipam" Oct 11 04:32:14 crc kubenswrapper[4798]: I1011 04:32:14.458742 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="29216d22-5411-4bbd-b2c3-f643df3218c0" containerName="ceph-hci-pre-edpm-deployment-openstack-edpm-ipam" Oct 11 04:32:14 crc kubenswrapper[4798]: I1011 04:32:14.459317 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-8vffc" Oct 11 04:32:14 crc kubenswrapper[4798]: I1011 04:32:14.465096 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-mgsh6" Oct 11 04:32:14 crc kubenswrapper[4798]: I1011 04:32:14.465285 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Oct 11 04:32:14 crc kubenswrapper[4798]: I1011 04:32:14.466723 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Oct 11 04:32:14 crc kubenswrapper[4798]: I1011 04:32:14.467041 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Oct 11 04:32:14 crc kubenswrapper[4798]: I1011 04:32:14.467191 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceph-conf-files" Oct 11 04:32:14 crc kubenswrapper[4798]: I1011 04:32:14.467342 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-8vffc"] Oct 11 04:32:14 crc kubenswrapper[4798]: I1011 04:32:14.525804 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/bcf6a25c-81f9-4a1f-bf5e-9c15b703c682-ssh-key\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-8vffc\" (UID: \"bcf6a25c-81f9-4a1f-bf5e-9c15b703c682\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-8vffc" Oct 11 04:32:14 crc kubenswrapper[4798]: I1011 04:32:14.526432 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/bcf6a25c-81f9-4a1f-bf5e-9c15b703c682-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-8vffc\" (UID: \"bcf6a25c-81f9-4a1f-bf5e-9c15b703c682\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-8vffc" Oct 11 04:32:14 crc kubenswrapper[4798]: I1011 04:32:14.526574 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8rbxx\" (UniqueName: \"kubernetes.io/projected/bcf6a25c-81f9-4a1f-bf5e-9c15b703c682-kube-api-access-8rbxx\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-8vffc\" (UID: \"bcf6a25c-81f9-4a1f-bf5e-9c15b703c682\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-8vffc" Oct 11 04:32:14 crc kubenswrapper[4798]: I1011 04:32:14.526674 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/bcf6a25c-81f9-4a1f-bf5e-9c15b703c682-ceph\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-8vffc\" (UID: \"bcf6a25c-81f9-4a1f-bf5e-9c15b703c682\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-8vffc" Oct 11 04:32:14 crc kubenswrapper[4798]: I1011 04:32:14.628354 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/bcf6a25c-81f9-4a1f-bf5e-9c15b703c682-ssh-key\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-8vffc\" (UID: \"bcf6a25c-81f9-4a1f-bf5e-9c15b703c682\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-8vffc" Oct 11 04:32:14 crc kubenswrapper[4798]: I1011 04:32:14.628448 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/bcf6a25c-81f9-4a1f-bf5e-9c15b703c682-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-8vffc\" (UID: \"bcf6a25c-81f9-4a1f-bf5e-9c15b703c682\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-8vffc" Oct 11 04:32:14 crc kubenswrapper[4798]: I1011 04:32:14.628597 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8rbxx\" (UniqueName: \"kubernetes.io/projected/bcf6a25c-81f9-4a1f-bf5e-9c15b703c682-kube-api-access-8rbxx\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-8vffc\" (UID: \"bcf6a25c-81f9-4a1f-bf5e-9c15b703c682\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-8vffc" Oct 11 04:32:14 crc kubenswrapper[4798]: I1011 04:32:14.630189 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/bcf6a25c-81f9-4a1f-bf5e-9c15b703c682-ceph\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-8vffc\" (UID: \"bcf6a25c-81f9-4a1f-bf5e-9c15b703c682\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-8vffc" Oct 11 04:32:14 crc kubenswrapper[4798]: I1011 04:32:14.634229 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/bcf6a25c-81f9-4a1f-bf5e-9c15b703c682-ceph\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-8vffc\" (UID: \"bcf6a25c-81f9-4a1f-bf5e-9c15b703c682\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-8vffc" Oct 11 04:32:14 crc kubenswrapper[4798]: I1011 04:32:14.636161 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/bcf6a25c-81f9-4a1f-bf5e-9c15b703c682-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-8vffc\" (UID: \"bcf6a25c-81f9-4a1f-bf5e-9c15b703c682\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-8vffc" Oct 11 04:32:14 crc kubenswrapper[4798]: I1011 04:32:14.637093 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/bcf6a25c-81f9-4a1f-bf5e-9c15b703c682-ssh-key\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-8vffc\" (UID: \"bcf6a25c-81f9-4a1f-bf5e-9c15b703c682\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-8vffc" Oct 11 04:32:14 crc kubenswrapper[4798]: I1011 04:32:14.649172 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8rbxx\" (UniqueName: \"kubernetes.io/projected/bcf6a25c-81f9-4a1f-bf5e-9c15b703c682-kube-api-access-8rbxx\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-8vffc\" (UID: \"bcf6a25c-81f9-4a1f-bf5e-9c15b703c682\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-8vffc" Oct 11 04:32:14 crc kubenswrapper[4798]: I1011 04:32:14.650621 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-c4ltn"] Oct 11 04:32:14 crc kubenswrapper[4798]: I1011 04:32:14.778276 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-8vffc" Oct 11 04:32:15 crc kubenswrapper[4798]: I1011 04:32:15.247061 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-8vffc"] Oct 11 04:32:15 crc kubenswrapper[4798]: I1011 04:32:15.383514 4798 generic.go:334] "Generic (PLEG): container finished" podID="ce103d1b-59c7-46ee-8f9a-917ad3d26822" containerID="fe80ede5798d4f450b2de888e23aaf674d96c011706bf21bf6b17de2d27dec48" exitCode=0 Oct 11 04:32:15 crc kubenswrapper[4798]: I1011 04:32:15.383583 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-c4ltn" event={"ID":"ce103d1b-59c7-46ee-8f9a-917ad3d26822","Type":"ContainerDied","Data":"fe80ede5798d4f450b2de888e23aaf674d96c011706bf21bf6b17de2d27dec48"} Oct 11 04:32:15 crc kubenswrapper[4798]: I1011 04:32:15.383609 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-c4ltn" event={"ID":"ce103d1b-59c7-46ee-8f9a-917ad3d26822","Type":"ContainerStarted","Data":"1fc056e1a348d7b04b111fae71a52873dbd9372bf44a95f7d6dc2674544a07ad"} Oct 11 04:32:15 crc kubenswrapper[4798]: I1011 04:32:15.385227 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-8vffc" event={"ID":"bcf6a25c-81f9-4a1f-bf5e-9c15b703c682","Type":"ContainerStarted","Data":"b931bad11f275900258c36e91736020df51a96c373787ea00378a594c6b85cbe"} Oct 11 04:32:16 crc kubenswrapper[4798]: I1011 04:32:16.149940 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-6lrhb"] Oct 11 04:32:16 crc kubenswrapper[4798]: I1011 04:32:16.152895 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-6lrhb" Oct 11 04:32:16 crc kubenswrapper[4798]: I1011 04:32:16.162596 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-6lrhb"] Oct 11 04:32:16 crc kubenswrapper[4798]: I1011 04:32:16.263265 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c33638ce-52bf-4f0b-980f-e7bf16143932-utilities\") pod \"redhat-marketplace-6lrhb\" (UID: \"c33638ce-52bf-4f0b-980f-e7bf16143932\") " pod="openshift-marketplace/redhat-marketplace-6lrhb" Oct 11 04:32:16 crc kubenswrapper[4798]: I1011 04:32:16.263482 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c33638ce-52bf-4f0b-980f-e7bf16143932-catalog-content\") pod \"redhat-marketplace-6lrhb\" (UID: \"c33638ce-52bf-4f0b-980f-e7bf16143932\") " pod="openshift-marketplace/redhat-marketplace-6lrhb" Oct 11 04:32:16 crc kubenswrapper[4798]: I1011 04:32:16.263543 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mzgqm\" (UniqueName: \"kubernetes.io/projected/c33638ce-52bf-4f0b-980f-e7bf16143932-kube-api-access-mzgqm\") pod \"redhat-marketplace-6lrhb\" (UID: \"c33638ce-52bf-4f0b-980f-e7bf16143932\") " pod="openshift-marketplace/redhat-marketplace-6lrhb" Oct 11 04:32:16 crc kubenswrapper[4798]: I1011 04:32:16.364794 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c33638ce-52bf-4f0b-980f-e7bf16143932-catalog-content\") pod \"redhat-marketplace-6lrhb\" (UID: \"c33638ce-52bf-4f0b-980f-e7bf16143932\") " pod="openshift-marketplace/redhat-marketplace-6lrhb" Oct 11 04:32:16 crc kubenswrapper[4798]: I1011 04:32:16.365044 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mzgqm\" (UniqueName: \"kubernetes.io/projected/c33638ce-52bf-4f0b-980f-e7bf16143932-kube-api-access-mzgqm\") pod \"redhat-marketplace-6lrhb\" (UID: \"c33638ce-52bf-4f0b-980f-e7bf16143932\") " pod="openshift-marketplace/redhat-marketplace-6lrhb" Oct 11 04:32:16 crc kubenswrapper[4798]: I1011 04:32:16.365258 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c33638ce-52bf-4f0b-980f-e7bf16143932-utilities\") pod \"redhat-marketplace-6lrhb\" (UID: \"c33638ce-52bf-4f0b-980f-e7bf16143932\") " pod="openshift-marketplace/redhat-marketplace-6lrhb" Oct 11 04:32:16 crc kubenswrapper[4798]: I1011 04:32:16.365785 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c33638ce-52bf-4f0b-980f-e7bf16143932-utilities\") pod \"redhat-marketplace-6lrhb\" (UID: \"c33638ce-52bf-4f0b-980f-e7bf16143932\") " pod="openshift-marketplace/redhat-marketplace-6lrhb" Oct 11 04:32:16 crc kubenswrapper[4798]: I1011 04:32:16.366664 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c33638ce-52bf-4f0b-980f-e7bf16143932-catalog-content\") pod \"redhat-marketplace-6lrhb\" (UID: \"c33638ce-52bf-4f0b-980f-e7bf16143932\") " pod="openshift-marketplace/redhat-marketplace-6lrhb" Oct 11 04:32:16 crc kubenswrapper[4798]: I1011 04:32:16.399050 4798 generic.go:334] "Generic (PLEG): container finished" podID="ce103d1b-59c7-46ee-8f9a-917ad3d26822" containerID="18093c63d6a292e94895a6fc882ff3ea9cf613e987fe4d0a724e90ed881dc282" exitCode=0 Oct 11 04:32:16 crc kubenswrapper[4798]: I1011 04:32:16.399326 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-c4ltn" event={"ID":"ce103d1b-59c7-46ee-8f9a-917ad3d26822","Type":"ContainerDied","Data":"18093c63d6a292e94895a6fc882ff3ea9cf613e987fe4d0a724e90ed881dc282"} Oct 11 04:32:16 crc kubenswrapper[4798]: I1011 04:32:16.402051 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mzgqm\" (UniqueName: \"kubernetes.io/projected/c33638ce-52bf-4f0b-980f-e7bf16143932-kube-api-access-mzgqm\") pod \"redhat-marketplace-6lrhb\" (UID: \"c33638ce-52bf-4f0b-980f-e7bf16143932\") " pod="openshift-marketplace/redhat-marketplace-6lrhb" Oct 11 04:32:16 crc kubenswrapper[4798]: I1011 04:32:16.402083 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-8vffc" event={"ID":"bcf6a25c-81f9-4a1f-bf5e-9c15b703c682","Type":"ContainerStarted","Data":"e1632a2c5b8c2f5ef9cbf095cbce3c367325066d53ae0cb00f7334029c67d5ab"} Oct 11 04:32:16 crc kubenswrapper[4798]: I1011 04:32:16.451590 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-8vffc" podStartSLOduration=2.032270961 podStartE2EDuration="2.451542812s" podCreationTimestamp="2025-10-11 04:32:14 +0000 UTC" firstStartedPulling="2025-10-11 04:32:15.256586596 +0000 UTC m=+2230.592876282" lastFinishedPulling="2025-10-11 04:32:15.675858447 +0000 UTC m=+2231.012148133" observedRunningTime="2025-10-11 04:32:16.44087888 +0000 UTC m=+2231.777168576" watchObservedRunningTime="2025-10-11 04:32:16.451542812 +0000 UTC m=+2231.787832508" Oct 11 04:32:16 crc kubenswrapper[4798]: I1011 04:32:16.478150 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-6lrhb" Oct 11 04:32:16 crc kubenswrapper[4798]: I1011 04:32:16.945282 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-6lrhb"] Oct 11 04:32:16 crc kubenswrapper[4798]: W1011 04:32:16.953883 4798 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc33638ce_52bf_4f0b_980f_e7bf16143932.slice/crio-ec59ebc9e9a2f1f5111d43505f82251341609b0020709b99eaa05860e20751a3 WatchSource:0}: Error finding container ec59ebc9e9a2f1f5111d43505f82251341609b0020709b99eaa05860e20751a3: Status 404 returned error can't find the container with id ec59ebc9e9a2f1f5111d43505f82251341609b0020709b99eaa05860e20751a3 Oct 11 04:32:17 crc kubenswrapper[4798]: I1011 04:32:17.411834 4798 generic.go:334] "Generic (PLEG): container finished" podID="c33638ce-52bf-4f0b-980f-e7bf16143932" containerID="da71647053967b4711f86eb3b349b38131834416c05c7b151ce6333f0dfe56a4" exitCode=0 Oct 11 04:32:17 crc kubenswrapper[4798]: I1011 04:32:17.412469 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6lrhb" event={"ID":"c33638ce-52bf-4f0b-980f-e7bf16143932","Type":"ContainerDied","Data":"da71647053967b4711f86eb3b349b38131834416c05c7b151ce6333f0dfe56a4"} Oct 11 04:32:17 crc kubenswrapper[4798]: I1011 04:32:17.412511 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6lrhb" event={"ID":"c33638ce-52bf-4f0b-980f-e7bf16143932","Type":"ContainerStarted","Data":"ec59ebc9e9a2f1f5111d43505f82251341609b0020709b99eaa05860e20751a3"} Oct 11 04:32:17 crc kubenswrapper[4798]: I1011 04:32:17.418791 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-c4ltn" event={"ID":"ce103d1b-59c7-46ee-8f9a-917ad3d26822","Type":"ContainerStarted","Data":"9e5e420bf7ce26376fc5dcc71921c276ab2afc7e8a1565c9c2f01619e08359d4"} Oct 11 04:32:17 crc kubenswrapper[4798]: I1011 04:32:17.458363 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-c4ltn" podStartSLOduration=2.7377248119999997 podStartE2EDuration="4.458344639s" podCreationTimestamp="2025-10-11 04:32:13 +0000 UTC" firstStartedPulling="2025-10-11 04:32:15.385360905 +0000 UTC m=+2230.721650591" lastFinishedPulling="2025-10-11 04:32:17.105980732 +0000 UTC m=+2232.442270418" observedRunningTime="2025-10-11 04:32:17.449292462 +0000 UTC m=+2232.785582168" watchObservedRunningTime="2025-10-11 04:32:17.458344639 +0000 UTC m=+2232.794634325" Oct 11 04:32:18 crc kubenswrapper[4798]: I1011 04:32:18.427497 4798 generic.go:334] "Generic (PLEG): container finished" podID="c33638ce-52bf-4f0b-980f-e7bf16143932" containerID="076ee417f0645a1df708ddd04781f56503fb9bd8ba36a502be99ec0c10635eb4" exitCode=0 Oct 11 04:32:18 crc kubenswrapper[4798]: I1011 04:32:18.427559 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6lrhb" event={"ID":"c33638ce-52bf-4f0b-980f-e7bf16143932","Type":"ContainerDied","Data":"076ee417f0645a1df708ddd04781f56503fb9bd8ba36a502be99ec0c10635eb4"} Oct 11 04:32:19 crc kubenswrapper[4798]: I1011 04:32:19.435866 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6lrhb" event={"ID":"c33638ce-52bf-4f0b-980f-e7bf16143932","Type":"ContainerStarted","Data":"a69c07295d4a46abce10c680abc58381934611bce83a53b1eaa4ee54ebbe64c2"} Oct 11 04:32:19 crc kubenswrapper[4798]: I1011 04:32:19.459913 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-6lrhb" podStartSLOduration=2.036166165 podStartE2EDuration="3.459897041s" podCreationTimestamp="2025-10-11 04:32:16 +0000 UTC" firstStartedPulling="2025-10-11 04:32:17.414676121 +0000 UTC m=+2232.750965807" lastFinishedPulling="2025-10-11 04:32:18.838406997 +0000 UTC m=+2234.174696683" observedRunningTime="2025-10-11 04:32:19.454383032 +0000 UTC m=+2234.790672718" watchObservedRunningTime="2025-10-11 04:32:19.459897041 +0000 UTC m=+2234.796186727" Oct 11 04:32:21 crc kubenswrapper[4798]: I1011 04:32:21.423995 4798 scope.go:117] "RemoveContainer" containerID="e764c82b5ef4905e7c6c502957bfa38bb423ee4a4677ee706a5d2b87d33d2c1c" Oct 11 04:32:21 crc kubenswrapper[4798]: E1011 04:32:21.424653 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h28s2_openshift-machine-config-operator(42571bc8-2186-4e3b-bba9-28f5a8f364d0)\"" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" Oct 11 04:32:24 crc kubenswrapper[4798]: I1011 04:32:24.106203 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-c4ltn" Oct 11 04:32:24 crc kubenswrapper[4798]: I1011 04:32:24.106688 4798 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-c4ltn" Oct 11 04:32:24 crc kubenswrapper[4798]: I1011 04:32:24.165717 4798 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-c4ltn" Oct 11 04:32:24 crc kubenswrapper[4798]: I1011 04:32:24.537480 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-c4ltn" Oct 11 04:32:24 crc kubenswrapper[4798]: I1011 04:32:24.602122 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-c4ltn"] Oct 11 04:32:26 crc kubenswrapper[4798]: I1011 04:32:26.478308 4798 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-6lrhb" Oct 11 04:32:26 crc kubenswrapper[4798]: I1011 04:32:26.479575 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-6lrhb" Oct 11 04:32:26 crc kubenswrapper[4798]: I1011 04:32:26.512187 4798 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-c4ltn" podUID="ce103d1b-59c7-46ee-8f9a-917ad3d26822" containerName="registry-server" containerID="cri-o://9e5e420bf7ce26376fc5dcc71921c276ab2afc7e8a1565c9c2f01619e08359d4" gracePeriod=2 Oct 11 04:32:26 crc kubenswrapper[4798]: I1011 04:32:26.542809 4798 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-6lrhb" Oct 11 04:32:26 crc kubenswrapper[4798]: I1011 04:32:26.621668 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-6lrhb" Oct 11 04:32:27 crc kubenswrapper[4798]: I1011 04:32:27.001809 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-c4ltn" Oct 11 04:32:27 crc kubenswrapper[4798]: I1011 04:32:27.110592 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bfww7\" (UniqueName: \"kubernetes.io/projected/ce103d1b-59c7-46ee-8f9a-917ad3d26822-kube-api-access-bfww7\") pod \"ce103d1b-59c7-46ee-8f9a-917ad3d26822\" (UID: \"ce103d1b-59c7-46ee-8f9a-917ad3d26822\") " Oct 11 04:32:27 crc kubenswrapper[4798]: I1011 04:32:27.110649 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ce103d1b-59c7-46ee-8f9a-917ad3d26822-utilities\") pod \"ce103d1b-59c7-46ee-8f9a-917ad3d26822\" (UID: \"ce103d1b-59c7-46ee-8f9a-917ad3d26822\") " Oct 11 04:32:27 crc kubenswrapper[4798]: I1011 04:32:27.110701 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ce103d1b-59c7-46ee-8f9a-917ad3d26822-catalog-content\") pod \"ce103d1b-59c7-46ee-8f9a-917ad3d26822\" (UID: \"ce103d1b-59c7-46ee-8f9a-917ad3d26822\") " Oct 11 04:32:27 crc kubenswrapper[4798]: I1011 04:32:27.111478 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ce103d1b-59c7-46ee-8f9a-917ad3d26822-utilities" (OuterVolumeSpecName: "utilities") pod "ce103d1b-59c7-46ee-8f9a-917ad3d26822" (UID: "ce103d1b-59c7-46ee-8f9a-917ad3d26822"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 04:32:27 crc kubenswrapper[4798]: I1011 04:32:27.116557 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ce103d1b-59c7-46ee-8f9a-917ad3d26822-kube-api-access-bfww7" (OuterVolumeSpecName: "kube-api-access-bfww7") pod "ce103d1b-59c7-46ee-8f9a-917ad3d26822" (UID: "ce103d1b-59c7-46ee-8f9a-917ad3d26822"). InnerVolumeSpecName "kube-api-access-bfww7". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 04:32:27 crc kubenswrapper[4798]: I1011 04:32:27.164431 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ce103d1b-59c7-46ee-8f9a-917ad3d26822-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "ce103d1b-59c7-46ee-8f9a-917ad3d26822" (UID: "ce103d1b-59c7-46ee-8f9a-917ad3d26822"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 04:32:27 crc kubenswrapper[4798]: I1011 04:32:27.213498 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bfww7\" (UniqueName: \"kubernetes.io/projected/ce103d1b-59c7-46ee-8f9a-917ad3d26822-kube-api-access-bfww7\") on node \"crc\" DevicePath \"\"" Oct 11 04:32:27 crc kubenswrapper[4798]: I1011 04:32:27.213558 4798 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ce103d1b-59c7-46ee-8f9a-917ad3d26822-utilities\") on node \"crc\" DevicePath \"\"" Oct 11 04:32:27 crc kubenswrapper[4798]: I1011 04:32:27.213580 4798 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ce103d1b-59c7-46ee-8f9a-917ad3d26822-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 11 04:32:27 crc kubenswrapper[4798]: I1011 04:32:27.523966 4798 generic.go:334] "Generic (PLEG): container finished" podID="ce103d1b-59c7-46ee-8f9a-917ad3d26822" containerID="9e5e420bf7ce26376fc5dcc71921c276ab2afc7e8a1565c9c2f01619e08359d4" exitCode=0 Oct 11 04:32:27 crc kubenswrapper[4798]: I1011 04:32:27.524100 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-c4ltn" Oct 11 04:32:27 crc kubenswrapper[4798]: I1011 04:32:27.524121 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-c4ltn" event={"ID":"ce103d1b-59c7-46ee-8f9a-917ad3d26822","Type":"ContainerDied","Data":"9e5e420bf7ce26376fc5dcc71921c276ab2afc7e8a1565c9c2f01619e08359d4"} Oct 11 04:32:27 crc kubenswrapper[4798]: I1011 04:32:27.524525 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-c4ltn" event={"ID":"ce103d1b-59c7-46ee-8f9a-917ad3d26822","Type":"ContainerDied","Data":"1fc056e1a348d7b04b111fae71a52873dbd9372bf44a95f7d6dc2674544a07ad"} Oct 11 04:32:27 crc kubenswrapper[4798]: I1011 04:32:27.524563 4798 scope.go:117] "RemoveContainer" containerID="9e5e420bf7ce26376fc5dcc71921c276ab2afc7e8a1565c9c2f01619e08359d4" Oct 11 04:32:27 crc kubenswrapper[4798]: I1011 04:32:27.549312 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-c4ltn"] Oct 11 04:32:27 crc kubenswrapper[4798]: I1011 04:32:27.557577 4798 scope.go:117] "RemoveContainer" containerID="18093c63d6a292e94895a6fc882ff3ea9cf613e987fe4d0a724e90ed881dc282" Oct 11 04:32:27 crc kubenswrapper[4798]: I1011 04:32:27.557974 4798 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-c4ltn"] Oct 11 04:32:27 crc kubenswrapper[4798]: I1011 04:32:27.581017 4798 scope.go:117] "RemoveContainer" containerID="fe80ede5798d4f450b2de888e23aaf674d96c011706bf21bf6b17de2d27dec48" Oct 11 04:32:27 crc kubenswrapper[4798]: I1011 04:32:27.615938 4798 scope.go:117] "RemoveContainer" containerID="9e5e420bf7ce26376fc5dcc71921c276ab2afc7e8a1565c9c2f01619e08359d4" Oct 11 04:32:27 crc kubenswrapper[4798]: E1011 04:32:27.616553 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9e5e420bf7ce26376fc5dcc71921c276ab2afc7e8a1565c9c2f01619e08359d4\": container with ID starting with 9e5e420bf7ce26376fc5dcc71921c276ab2afc7e8a1565c9c2f01619e08359d4 not found: ID does not exist" containerID="9e5e420bf7ce26376fc5dcc71921c276ab2afc7e8a1565c9c2f01619e08359d4" Oct 11 04:32:27 crc kubenswrapper[4798]: I1011 04:32:27.616595 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9e5e420bf7ce26376fc5dcc71921c276ab2afc7e8a1565c9c2f01619e08359d4"} err="failed to get container status \"9e5e420bf7ce26376fc5dcc71921c276ab2afc7e8a1565c9c2f01619e08359d4\": rpc error: code = NotFound desc = could not find container \"9e5e420bf7ce26376fc5dcc71921c276ab2afc7e8a1565c9c2f01619e08359d4\": container with ID starting with 9e5e420bf7ce26376fc5dcc71921c276ab2afc7e8a1565c9c2f01619e08359d4 not found: ID does not exist" Oct 11 04:32:27 crc kubenswrapper[4798]: I1011 04:32:27.616620 4798 scope.go:117] "RemoveContainer" containerID="18093c63d6a292e94895a6fc882ff3ea9cf613e987fe4d0a724e90ed881dc282" Oct 11 04:32:27 crc kubenswrapper[4798]: E1011 04:32:27.616879 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"18093c63d6a292e94895a6fc882ff3ea9cf613e987fe4d0a724e90ed881dc282\": container with ID starting with 18093c63d6a292e94895a6fc882ff3ea9cf613e987fe4d0a724e90ed881dc282 not found: ID does not exist" containerID="18093c63d6a292e94895a6fc882ff3ea9cf613e987fe4d0a724e90ed881dc282" Oct 11 04:32:27 crc kubenswrapper[4798]: I1011 04:32:27.616908 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"18093c63d6a292e94895a6fc882ff3ea9cf613e987fe4d0a724e90ed881dc282"} err="failed to get container status \"18093c63d6a292e94895a6fc882ff3ea9cf613e987fe4d0a724e90ed881dc282\": rpc error: code = NotFound desc = could not find container \"18093c63d6a292e94895a6fc882ff3ea9cf613e987fe4d0a724e90ed881dc282\": container with ID starting with 18093c63d6a292e94895a6fc882ff3ea9cf613e987fe4d0a724e90ed881dc282 not found: ID does not exist" Oct 11 04:32:27 crc kubenswrapper[4798]: I1011 04:32:27.616927 4798 scope.go:117] "RemoveContainer" containerID="fe80ede5798d4f450b2de888e23aaf674d96c011706bf21bf6b17de2d27dec48" Oct 11 04:32:27 crc kubenswrapper[4798]: E1011 04:32:27.617114 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fe80ede5798d4f450b2de888e23aaf674d96c011706bf21bf6b17de2d27dec48\": container with ID starting with fe80ede5798d4f450b2de888e23aaf674d96c011706bf21bf6b17de2d27dec48 not found: ID does not exist" containerID="fe80ede5798d4f450b2de888e23aaf674d96c011706bf21bf6b17de2d27dec48" Oct 11 04:32:27 crc kubenswrapper[4798]: I1011 04:32:27.617150 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fe80ede5798d4f450b2de888e23aaf674d96c011706bf21bf6b17de2d27dec48"} err="failed to get container status \"fe80ede5798d4f450b2de888e23aaf674d96c011706bf21bf6b17de2d27dec48\": rpc error: code = NotFound desc = could not find container \"fe80ede5798d4f450b2de888e23aaf674d96c011706bf21bf6b17de2d27dec48\": container with ID starting with fe80ede5798d4f450b2de888e23aaf674d96c011706bf21bf6b17de2d27dec48 not found: ID does not exist" Oct 11 04:32:28 crc kubenswrapper[4798]: I1011 04:32:28.422543 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-6lrhb"] Oct 11 04:32:28 crc kubenswrapper[4798]: I1011 04:32:28.547193 4798 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-6lrhb" podUID="c33638ce-52bf-4f0b-980f-e7bf16143932" containerName="registry-server" containerID="cri-o://a69c07295d4a46abce10c680abc58381934611bce83a53b1eaa4ee54ebbe64c2" gracePeriod=2 Oct 11 04:32:29 crc kubenswrapper[4798]: I1011 04:32:29.017277 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-6lrhb" Oct 11 04:32:29 crc kubenswrapper[4798]: I1011 04:32:29.156055 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c33638ce-52bf-4f0b-980f-e7bf16143932-catalog-content\") pod \"c33638ce-52bf-4f0b-980f-e7bf16143932\" (UID: \"c33638ce-52bf-4f0b-980f-e7bf16143932\") " Oct 11 04:32:29 crc kubenswrapper[4798]: I1011 04:32:29.156268 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c33638ce-52bf-4f0b-980f-e7bf16143932-utilities\") pod \"c33638ce-52bf-4f0b-980f-e7bf16143932\" (UID: \"c33638ce-52bf-4f0b-980f-e7bf16143932\") " Oct 11 04:32:29 crc kubenswrapper[4798]: I1011 04:32:29.156310 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mzgqm\" (UniqueName: \"kubernetes.io/projected/c33638ce-52bf-4f0b-980f-e7bf16143932-kube-api-access-mzgqm\") pod \"c33638ce-52bf-4f0b-980f-e7bf16143932\" (UID: \"c33638ce-52bf-4f0b-980f-e7bf16143932\") " Oct 11 04:32:29 crc kubenswrapper[4798]: I1011 04:32:29.158729 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c33638ce-52bf-4f0b-980f-e7bf16143932-utilities" (OuterVolumeSpecName: "utilities") pod "c33638ce-52bf-4f0b-980f-e7bf16143932" (UID: "c33638ce-52bf-4f0b-980f-e7bf16143932"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 04:32:29 crc kubenswrapper[4798]: I1011 04:32:29.163144 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c33638ce-52bf-4f0b-980f-e7bf16143932-kube-api-access-mzgqm" (OuterVolumeSpecName: "kube-api-access-mzgqm") pod "c33638ce-52bf-4f0b-980f-e7bf16143932" (UID: "c33638ce-52bf-4f0b-980f-e7bf16143932"). InnerVolumeSpecName "kube-api-access-mzgqm". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 04:32:29 crc kubenswrapper[4798]: I1011 04:32:29.170592 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c33638ce-52bf-4f0b-980f-e7bf16143932-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "c33638ce-52bf-4f0b-980f-e7bf16143932" (UID: "c33638ce-52bf-4f0b-980f-e7bf16143932"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 04:32:29 crc kubenswrapper[4798]: I1011 04:32:29.257896 4798 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c33638ce-52bf-4f0b-980f-e7bf16143932-utilities\") on node \"crc\" DevicePath \"\"" Oct 11 04:32:29 crc kubenswrapper[4798]: I1011 04:32:29.257930 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mzgqm\" (UniqueName: \"kubernetes.io/projected/c33638ce-52bf-4f0b-980f-e7bf16143932-kube-api-access-mzgqm\") on node \"crc\" DevicePath \"\"" Oct 11 04:32:29 crc kubenswrapper[4798]: I1011 04:32:29.257943 4798 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c33638ce-52bf-4f0b-980f-e7bf16143932-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 11 04:32:29 crc kubenswrapper[4798]: I1011 04:32:29.435714 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ce103d1b-59c7-46ee-8f9a-917ad3d26822" path="/var/lib/kubelet/pods/ce103d1b-59c7-46ee-8f9a-917ad3d26822/volumes" Oct 11 04:32:29 crc kubenswrapper[4798]: I1011 04:32:29.555875 4798 generic.go:334] "Generic (PLEG): container finished" podID="c33638ce-52bf-4f0b-980f-e7bf16143932" containerID="a69c07295d4a46abce10c680abc58381934611bce83a53b1eaa4ee54ebbe64c2" exitCode=0 Oct 11 04:32:29 crc kubenswrapper[4798]: I1011 04:32:29.555933 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-6lrhb" Oct 11 04:32:29 crc kubenswrapper[4798]: I1011 04:32:29.555952 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6lrhb" event={"ID":"c33638ce-52bf-4f0b-980f-e7bf16143932","Type":"ContainerDied","Data":"a69c07295d4a46abce10c680abc58381934611bce83a53b1eaa4ee54ebbe64c2"} Oct 11 04:32:29 crc kubenswrapper[4798]: I1011 04:32:29.557221 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6lrhb" event={"ID":"c33638ce-52bf-4f0b-980f-e7bf16143932","Type":"ContainerDied","Data":"ec59ebc9e9a2f1f5111d43505f82251341609b0020709b99eaa05860e20751a3"} Oct 11 04:32:29 crc kubenswrapper[4798]: I1011 04:32:29.557243 4798 scope.go:117] "RemoveContainer" containerID="a69c07295d4a46abce10c680abc58381934611bce83a53b1eaa4ee54ebbe64c2" Oct 11 04:32:29 crc kubenswrapper[4798]: I1011 04:32:29.580484 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-6lrhb"] Oct 11 04:32:29 crc kubenswrapper[4798]: I1011 04:32:29.582923 4798 scope.go:117] "RemoveContainer" containerID="076ee417f0645a1df708ddd04781f56503fb9bd8ba36a502be99ec0c10635eb4" Oct 11 04:32:29 crc kubenswrapper[4798]: I1011 04:32:29.586936 4798 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-6lrhb"] Oct 11 04:32:29 crc kubenswrapper[4798]: I1011 04:32:29.598729 4798 scope.go:117] "RemoveContainer" containerID="da71647053967b4711f86eb3b349b38131834416c05c7b151ce6333f0dfe56a4" Oct 11 04:32:29 crc kubenswrapper[4798]: I1011 04:32:29.640621 4798 scope.go:117] "RemoveContainer" containerID="a69c07295d4a46abce10c680abc58381934611bce83a53b1eaa4ee54ebbe64c2" Oct 11 04:32:29 crc kubenswrapper[4798]: E1011 04:32:29.641176 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a69c07295d4a46abce10c680abc58381934611bce83a53b1eaa4ee54ebbe64c2\": container with ID starting with a69c07295d4a46abce10c680abc58381934611bce83a53b1eaa4ee54ebbe64c2 not found: ID does not exist" containerID="a69c07295d4a46abce10c680abc58381934611bce83a53b1eaa4ee54ebbe64c2" Oct 11 04:32:29 crc kubenswrapper[4798]: I1011 04:32:29.641251 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a69c07295d4a46abce10c680abc58381934611bce83a53b1eaa4ee54ebbe64c2"} err="failed to get container status \"a69c07295d4a46abce10c680abc58381934611bce83a53b1eaa4ee54ebbe64c2\": rpc error: code = NotFound desc = could not find container \"a69c07295d4a46abce10c680abc58381934611bce83a53b1eaa4ee54ebbe64c2\": container with ID starting with a69c07295d4a46abce10c680abc58381934611bce83a53b1eaa4ee54ebbe64c2 not found: ID does not exist" Oct 11 04:32:29 crc kubenswrapper[4798]: I1011 04:32:29.641310 4798 scope.go:117] "RemoveContainer" containerID="076ee417f0645a1df708ddd04781f56503fb9bd8ba36a502be99ec0c10635eb4" Oct 11 04:32:29 crc kubenswrapper[4798]: E1011 04:32:29.641786 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"076ee417f0645a1df708ddd04781f56503fb9bd8ba36a502be99ec0c10635eb4\": container with ID starting with 076ee417f0645a1df708ddd04781f56503fb9bd8ba36a502be99ec0c10635eb4 not found: ID does not exist" containerID="076ee417f0645a1df708ddd04781f56503fb9bd8ba36a502be99ec0c10635eb4" Oct 11 04:32:29 crc kubenswrapper[4798]: I1011 04:32:29.641892 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"076ee417f0645a1df708ddd04781f56503fb9bd8ba36a502be99ec0c10635eb4"} err="failed to get container status \"076ee417f0645a1df708ddd04781f56503fb9bd8ba36a502be99ec0c10635eb4\": rpc error: code = NotFound desc = could not find container \"076ee417f0645a1df708ddd04781f56503fb9bd8ba36a502be99ec0c10635eb4\": container with ID starting with 076ee417f0645a1df708ddd04781f56503fb9bd8ba36a502be99ec0c10635eb4 not found: ID does not exist" Oct 11 04:32:29 crc kubenswrapper[4798]: I1011 04:32:29.641978 4798 scope.go:117] "RemoveContainer" containerID="da71647053967b4711f86eb3b349b38131834416c05c7b151ce6333f0dfe56a4" Oct 11 04:32:29 crc kubenswrapper[4798]: E1011 04:32:29.642586 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"da71647053967b4711f86eb3b349b38131834416c05c7b151ce6333f0dfe56a4\": container with ID starting with da71647053967b4711f86eb3b349b38131834416c05c7b151ce6333f0dfe56a4 not found: ID does not exist" containerID="da71647053967b4711f86eb3b349b38131834416c05c7b151ce6333f0dfe56a4" Oct 11 04:32:29 crc kubenswrapper[4798]: I1011 04:32:29.642631 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"da71647053967b4711f86eb3b349b38131834416c05c7b151ce6333f0dfe56a4"} err="failed to get container status \"da71647053967b4711f86eb3b349b38131834416c05c7b151ce6333f0dfe56a4\": rpc error: code = NotFound desc = could not find container \"da71647053967b4711f86eb3b349b38131834416c05c7b151ce6333f0dfe56a4\": container with ID starting with da71647053967b4711f86eb3b349b38131834416c05c7b151ce6333f0dfe56a4 not found: ID does not exist" Oct 11 04:32:31 crc kubenswrapper[4798]: I1011 04:32:31.442428 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c33638ce-52bf-4f0b-980f-e7bf16143932" path="/var/lib/kubelet/pods/c33638ce-52bf-4f0b-980f-e7bf16143932/volumes" Oct 11 04:32:32 crc kubenswrapper[4798]: I1011 04:32:32.423421 4798 scope.go:117] "RemoveContainer" containerID="e764c82b5ef4905e7c6c502957bfa38bb423ee4a4677ee706a5d2b87d33d2c1c" Oct 11 04:32:32 crc kubenswrapper[4798]: E1011 04:32:32.424104 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h28s2_openshift-machine-config-operator(42571bc8-2186-4e3b-bba9-28f5a8f364d0)\"" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" Oct 11 04:32:47 crc kubenswrapper[4798]: I1011 04:32:47.423437 4798 scope.go:117] "RemoveContainer" containerID="e764c82b5ef4905e7c6c502957bfa38bb423ee4a4677ee706a5d2b87d33d2c1c" Oct 11 04:32:47 crc kubenswrapper[4798]: E1011 04:32:47.424155 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h28s2_openshift-machine-config-operator(42571bc8-2186-4e3b-bba9-28f5a8f364d0)\"" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" Oct 11 04:33:00 crc kubenswrapper[4798]: I1011 04:33:00.423850 4798 scope.go:117] "RemoveContainer" containerID="e764c82b5ef4905e7c6c502957bfa38bb423ee4a4677ee706a5d2b87d33d2c1c" Oct 11 04:33:00 crc kubenswrapper[4798]: E1011 04:33:00.425030 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h28s2_openshift-machine-config-operator(42571bc8-2186-4e3b-bba9-28f5a8f364d0)\"" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" Oct 11 04:33:02 crc kubenswrapper[4798]: I1011 04:33:02.841800 4798 generic.go:334] "Generic (PLEG): container finished" podID="bcf6a25c-81f9-4a1f-bf5e-9c15b703c682" containerID="e1632a2c5b8c2f5ef9cbf095cbce3c367325066d53ae0cb00f7334029c67d5ab" exitCode=0 Oct 11 04:33:02 crc kubenswrapper[4798]: I1011 04:33:02.841906 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-8vffc" event={"ID":"bcf6a25c-81f9-4a1f-bf5e-9c15b703c682","Type":"ContainerDied","Data":"e1632a2c5b8c2f5ef9cbf095cbce3c367325066d53ae0cb00f7334029c67d5ab"} Oct 11 04:33:04 crc kubenswrapper[4798]: I1011 04:33:04.311783 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-8vffc" Oct 11 04:33:04 crc kubenswrapper[4798]: I1011 04:33:04.347362 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8rbxx\" (UniqueName: \"kubernetes.io/projected/bcf6a25c-81f9-4a1f-bf5e-9c15b703c682-kube-api-access-8rbxx\") pod \"bcf6a25c-81f9-4a1f-bf5e-9c15b703c682\" (UID: \"bcf6a25c-81f9-4a1f-bf5e-9c15b703c682\") " Oct 11 04:33:04 crc kubenswrapper[4798]: I1011 04:33:04.347470 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/bcf6a25c-81f9-4a1f-bf5e-9c15b703c682-ssh-key\") pod \"bcf6a25c-81f9-4a1f-bf5e-9c15b703c682\" (UID: \"bcf6a25c-81f9-4a1f-bf5e-9c15b703c682\") " Oct 11 04:33:04 crc kubenswrapper[4798]: I1011 04:33:04.347534 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/bcf6a25c-81f9-4a1f-bf5e-9c15b703c682-inventory\") pod \"bcf6a25c-81f9-4a1f-bf5e-9c15b703c682\" (UID: \"bcf6a25c-81f9-4a1f-bf5e-9c15b703c682\") " Oct 11 04:33:04 crc kubenswrapper[4798]: I1011 04:33:04.347717 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/bcf6a25c-81f9-4a1f-bf5e-9c15b703c682-ceph\") pod \"bcf6a25c-81f9-4a1f-bf5e-9c15b703c682\" (UID: \"bcf6a25c-81f9-4a1f-bf5e-9c15b703c682\") " Oct 11 04:33:04 crc kubenswrapper[4798]: I1011 04:33:04.356568 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bcf6a25c-81f9-4a1f-bf5e-9c15b703c682-ceph" (OuterVolumeSpecName: "ceph") pod "bcf6a25c-81f9-4a1f-bf5e-9c15b703c682" (UID: "bcf6a25c-81f9-4a1f-bf5e-9c15b703c682"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:33:04 crc kubenswrapper[4798]: I1011 04:33:04.357964 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bcf6a25c-81f9-4a1f-bf5e-9c15b703c682-kube-api-access-8rbxx" (OuterVolumeSpecName: "kube-api-access-8rbxx") pod "bcf6a25c-81f9-4a1f-bf5e-9c15b703c682" (UID: "bcf6a25c-81f9-4a1f-bf5e-9c15b703c682"). InnerVolumeSpecName "kube-api-access-8rbxx". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 04:33:04 crc kubenswrapper[4798]: I1011 04:33:04.385227 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bcf6a25c-81f9-4a1f-bf5e-9c15b703c682-inventory" (OuterVolumeSpecName: "inventory") pod "bcf6a25c-81f9-4a1f-bf5e-9c15b703c682" (UID: "bcf6a25c-81f9-4a1f-bf5e-9c15b703c682"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:33:04 crc kubenswrapper[4798]: I1011 04:33:04.387755 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bcf6a25c-81f9-4a1f-bf5e-9c15b703c682-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "bcf6a25c-81f9-4a1f-bf5e-9c15b703c682" (UID: "bcf6a25c-81f9-4a1f-bf5e-9c15b703c682"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:33:04 crc kubenswrapper[4798]: I1011 04:33:04.449329 4798 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/bcf6a25c-81f9-4a1f-bf5e-9c15b703c682-ceph\") on node \"crc\" DevicePath \"\"" Oct 11 04:33:04 crc kubenswrapper[4798]: I1011 04:33:04.449367 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8rbxx\" (UniqueName: \"kubernetes.io/projected/bcf6a25c-81f9-4a1f-bf5e-9c15b703c682-kube-api-access-8rbxx\") on node \"crc\" DevicePath \"\"" Oct 11 04:33:04 crc kubenswrapper[4798]: I1011 04:33:04.449377 4798 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/bcf6a25c-81f9-4a1f-bf5e-9c15b703c682-ssh-key\") on node \"crc\" DevicePath \"\"" Oct 11 04:33:04 crc kubenswrapper[4798]: I1011 04:33:04.449385 4798 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/bcf6a25c-81f9-4a1f-bf5e-9c15b703c682-inventory\") on node \"crc\" DevicePath \"\"" Oct 11 04:33:04 crc kubenswrapper[4798]: I1011 04:33:04.866344 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-8vffc" event={"ID":"bcf6a25c-81f9-4a1f-bf5e-9c15b703c682","Type":"ContainerDied","Data":"b931bad11f275900258c36e91736020df51a96c373787ea00378a594c6b85cbe"} Oct 11 04:33:04 crc kubenswrapper[4798]: I1011 04:33:04.866446 4798 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b931bad11f275900258c36e91736020df51a96c373787ea00378a594c6b85cbe" Oct 11 04:33:04 crc kubenswrapper[4798]: I1011 04:33:04.866545 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-8vffc" Oct 11 04:33:04 crc kubenswrapper[4798]: I1011 04:33:04.965322 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-rznrc"] Oct 11 04:33:04 crc kubenswrapper[4798]: E1011 04:33:04.966084 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ce103d1b-59c7-46ee-8f9a-917ad3d26822" containerName="registry-server" Oct 11 04:33:04 crc kubenswrapper[4798]: I1011 04:33:04.966119 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="ce103d1b-59c7-46ee-8f9a-917ad3d26822" containerName="registry-server" Oct 11 04:33:04 crc kubenswrapper[4798]: E1011 04:33:04.966158 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ce103d1b-59c7-46ee-8f9a-917ad3d26822" containerName="extract-utilities" Oct 11 04:33:04 crc kubenswrapper[4798]: I1011 04:33:04.966174 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="ce103d1b-59c7-46ee-8f9a-917ad3d26822" containerName="extract-utilities" Oct 11 04:33:04 crc kubenswrapper[4798]: E1011 04:33:04.966188 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c33638ce-52bf-4f0b-980f-e7bf16143932" containerName="extract-content" Oct 11 04:33:04 crc kubenswrapper[4798]: I1011 04:33:04.966203 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="c33638ce-52bf-4f0b-980f-e7bf16143932" containerName="extract-content" Oct 11 04:33:04 crc kubenswrapper[4798]: E1011 04:33:04.966234 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c33638ce-52bf-4f0b-980f-e7bf16143932" containerName="extract-utilities" Oct 11 04:33:04 crc kubenswrapper[4798]: I1011 04:33:04.966243 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="c33638ce-52bf-4f0b-980f-e7bf16143932" containerName="extract-utilities" Oct 11 04:33:04 crc kubenswrapper[4798]: E1011 04:33:04.966254 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ce103d1b-59c7-46ee-8f9a-917ad3d26822" containerName="extract-content" Oct 11 04:33:04 crc kubenswrapper[4798]: I1011 04:33:04.966262 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="ce103d1b-59c7-46ee-8f9a-917ad3d26822" containerName="extract-content" Oct 11 04:33:04 crc kubenswrapper[4798]: E1011 04:33:04.966284 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bcf6a25c-81f9-4a1f-bf5e-9c15b703c682" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Oct 11 04:33:04 crc kubenswrapper[4798]: I1011 04:33:04.966294 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="bcf6a25c-81f9-4a1f-bf5e-9c15b703c682" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Oct 11 04:33:04 crc kubenswrapper[4798]: E1011 04:33:04.966308 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c33638ce-52bf-4f0b-980f-e7bf16143932" containerName="registry-server" Oct 11 04:33:04 crc kubenswrapper[4798]: I1011 04:33:04.966316 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="c33638ce-52bf-4f0b-980f-e7bf16143932" containerName="registry-server" Oct 11 04:33:04 crc kubenswrapper[4798]: I1011 04:33:04.966648 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="ce103d1b-59c7-46ee-8f9a-917ad3d26822" containerName="registry-server" Oct 11 04:33:04 crc kubenswrapper[4798]: I1011 04:33:04.966679 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="c33638ce-52bf-4f0b-980f-e7bf16143932" containerName="registry-server" Oct 11 04:33:04 crc kubenswrapper[4798]: I1011 04:33:04.966715 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="bcf6a25c-81f9-4a1f-bf5e-9c15b703c682" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Oct 11 04:33:04 crc kubenswrapper[4798]: I1011 04:33:04.967655 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-rznrc" Oct 11 04:33:04 crc kubenswrapper[4798]: I1011 04:33:04.970303 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceph-conf-files" Oct 11 04:33:04 crc kubenswrapper[4798]: I1011 04:33:04.970615 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Oct 11 04:33:04 crc kubenswrapper[4798]: I1011 04:33:04.970657 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Oct 11 04:33:04 crc kubenswrapper[4798]: I1011 04:33:04.971045 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-mgsh6" Oct 11 04:33:04 crc kubenswrapper[4798]: I1011 04:33:04.971148 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Oct 11 04:33:04 crc kubenswrapper[4798]: I1011 04:33:04.973449 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-rznrc"] Oct 11 04:33:05 crc kubenswrapper[4798]: I1011 04:33:05.061926 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-scsgv\" (UniqueName: \"kubernetes.io/projected/e75ed635-7172-447f-ab1b-b9af46d15be1-kube-api-access-scsgv\") pod \"ssh-known-hosts-edpm-deployment-rznrc\" (UID: \"e75ed635-7172-447f-ab1b-b9af46d15be1\") " pod="openstack/ssh-known-hosts-edpm-deployment-rznrc" Oct 11 04:33:05 crc kubenswrapper[4798]: I1011 04:33:05.062072 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/e75ed635-7172-447f-ab1b-b9af46d15be1-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-rznrc\" (UID: \"e75ed635-7172-447f-ab1b-b9af46d15be1\") " pod="openstack/ssh-known-hosts-edpm-deployment-rznrc" Oct 11 04:33:05 crc kubenswrapper[4798]: I1011 04:33:05.062161 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/e75ed635-7172-447f-ab1b-b9af46d15be1-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-rznrc\" (UID: \"e75ed635-7172-447f-ab1b-b9af46d15be1\") " pod="openstack/ssh-known-hosts-edpm-deployment-rznrc" Oct 11 04:33:05 crc kubenswrapper[4798]: I1011 04:33:05.062219 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/e75ed635-7172-447f-ab1b-b9af46d15be1-ceph\") pod \"ssh-known-hosts-edpm-deployment-rznrc\" (UID: \"e75ed635-7172-447f-ab1b-b9af46d15be1\") " pod="openstack/ssh-known-hosts-edpm-deployment-rznrc" Oct 11 04:33:05 crc kubenswrapper[4798]: I1011 04:33:05.164046 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-scsgv\" (UniqueName: \"kubernetes.io/projected/e75ed635-7172-447f-ab1b-b9af46d15be1-kube-api-access-scsgv\") pod \"ssh-known-hosts-edpm-deployment-rznrc\" (UID: \"e75ed635-7172-447f-ab1b-b9af46d15be1\") " pod="openstack/ssh-known-hosts-edpm-deployment-rznrc" Oct 11 04:33:05 crc kubenswrapper[4798]: I1011 04:33:05.164164 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/e75ed635-7172-447f-ab1b-b9af46d15be1-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-rznrc\" (UID: \"e75ed635-7172-447f-ab1b-b9af46d15be1\") " pod="openstack/ssh-known-hosts-edpm-deployment-rznrc" Oct 11 04:33:05 crc kubenswrapper[4798]: I1011 04:33:05.164192 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/e75ed635-7172-447f-ab1b-b9af46d15be1-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-rznrc\" (UID: \"e75ed635-7172-447f-ab1b-b9af46d15be1\") " pod="openstack/ssh-known-hosts-edpm-deployment-rznrc" Oct 11 04:33:05 crc kubenswrapper[4798]: I1011 04:33:05.164226 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/e75ed635-7172-447f-ab1b-b9af46d15be1-ceph\") pod \"ssh-known-hosts-edpm-deployment-rznrc\" (UID: \"e75ed635-7172-447f-ab1b-b9af46d15be1\") " pod="openstack/ssh-known-hosts-edpm-deployment-rznrc" Oct 11 04:33:05 crc kubenswrapper[4798]: I1011 04:33:05.171636 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/e75ed635-7172-447f-ab1b-b9af46d15be1-ceph\") pod \"ssh-known-hosts-edpm-deployment-rznrc\" (UID: \"e75ed635-7172-447f-ab1b-b9af46d15be1\") " pod="openstack/ssh-known-hosts-edpm-deployment-rznrc" Oct 11 04:33:05 crc kubenswrapper[4798]: I1011 04:33:05.171804 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/e75ed635-7172-447f-ab1b-b9af46d15be1-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-rznrc\" (UID: \"e75ed635-7172-447f-ab1b-b9af46d15be1\") " pod="openstack/ssh-known-hosts-edpm-deployment-rznrc" Oct 11 04:33:05 crc kubenswrapper[4798]: I1011 04:33:05.172150 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/e75ed635-7172-447f-ab1b-b9af46d15be1-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-rznrc\" (UID: \"e75ed635-7172-447f-ab1b-b9af46d15be1\") " pod="openstack/ssh-known-hosts-edpm-deployment-rznrc" Oct 11 04:33:05 crc kubenswrapper[4798]: I1011 04:33:05.193812 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-scsgv\" (UniqueName: \"kubernetes.io/projected/e75ed635-7172-447f-ab1b-b9af46d15be1-kube-api-access-scsgv\") pod \"ssh-known-hosts-edpm-deployment-rznrc\" (UID: \"e75ed635-7172-447f-ab1b-b9af46d15be1\") " pod="openstack/ssh-known-hosts-edpm-deployment-rznrc" Oct 11 04:33:05 crc kubenswrapper[4798]: I1011 04:33:05.341061 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-rznrc" Oct 11 04:33:05 crc kubenswrapper[4798]: I1011 04:33:05.929241 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-rznrc"] Oct 11 04:33:05 crc kubenswrapper[4798]: W1011 04:33:05.946504 4798 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode75ed635_7172_447f_ab1b_b9af46d15be1.slice/crio-3e96aff4d83e02f4b605ec889fbc4b1d36e6c630a45306b32409d5182387c73d WatchSource:0}: Error finding container 3e96aff4d83e02f4b605ec889fbc4b1d36e6c630a45306b32409d5182387c73d: Status 404 returned error can't find the container with id 3e96aff4d83e02f4b605ec889fbc4b1d36e6c630a45306b32409d5182387c73d Oct 11 04:33:06 crc kubenswrapper[4798]: I1011 04:33:06.361981 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Oct 11 04:33:06 crc kubenswrapper[4798]: I1011 04:33:06.896430 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-rznrc" event={"ID":"e75ed635-7172-447f-ab1b-b9af46d15be1","Type":"ContainerStarted","Data":"fd5337cf5c947e869df48bcd262dfa6d4808c527fd64cb9087ee79dce4ce0bf3"} Oct 11 04:33:06 crc kubenswrapper[4798]: I1011 04:33:06.896477 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-rznrc" event={"ID":"e75ed635-7172-447f-ab1b-b9af46d15be1","Type":"ContainerStarted","Data":"3e96aff4d83e02f4b605ec889fbc4b1d36e6c630a45306b32409d5182387c73d"} Oct 11 04:33:06 crc kubenswrapper[4798]: I1011 04:33:06.922795 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ssh-known-hosts-edpm-deployment-rznrc" podStartSLOduration=2.514658721 podStartE2EDuration="2.922775289s" podCreationTimestamp="2025-10-11 04:33:04 +0000 UTC" firstStartedPulling="2025-10-11 04:33:05.950263637 +0000 UTC m=+2281.286553323" lastFinishedPulling="2025-10-11 04:33:06.358380165 +0000 UTC m=+2281.694669891" observedRunningTime="2025-10-11 04:33:06.917262289 +0000 UTC m=+2282.253552015" watchObservedRunningTime="2025-10-11 04:33:06.922775289 +0000 UTC m=+2282.259064985" Oct 11 04:33:15 crc kubenswrapper[4798]: I1011 04:33:15.437114 4798 scope.go:117] "RemoveContainer" containerID="e764c82b5ef4905e7c6c502957bfa38bb423ee4a4677ee706a5d2b87d33d2c1c" Oct 11 04:33:15 crc kubenswrapper[4798]: E1011 04:33:15.440065 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h28s2_openshift-machine-config-operator(42571bc8-2186-4e3b-bba9-28f5a8f364d0)\"" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" Oct 11 04:33:18 crc kubenswrapper[4798]: I1011 04:33:18.010742 4798 generic.go:334] "Generic (PLEG): container finished" podID="e75ed635-7172-447f-ab1b-b9af46d15be1" containerID="fd5337cf5c947e869df48bcd262dfa6d4808c527fd64cb9087ee79dce4ce0bf3" exitCode=0 Oct 11 04:33:18 crc kubenswrapper[4798]: I1011 04:33:18.010877 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-rznrc" event={"ID":"e75ed635-7172-447f-ab1b-b9af46d15be1","Type":"ContainerDied","Data":"fd5337cf5c947e869df48bcd262dfa6d4808c527fd64cb9087ee79dce4ce0bf3"} Oct 11 04:33:19 crc kubenswrapper[4798]: I1011 04:33:19.560747 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-rznrc" Oct 11 04:33:19 crc kubenswrapper[4798]: I1011 04:33:19.692489 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/e75ed635-7172-447f-ab1b-b9af46d15be1-ceph\") pod \"e75ed635-7172-447f-ab1b-b9af46d15be1\" (UID: \"e75ed635-7172-447f-ab1b-b9af46d15be1\") " Oct 11 04:33:19 crc kubenswrapper[4798]: I1011 04:33:19.692917 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/e75ed635-7172-447f-ab1b-b9af46d15be1-ssh-key-openstack-edpm-ipam\") pod \"e75ed635-7172-447f-ab1b-b9af46d15be1\" (UID: \"e75ed635-7172-447f-ab1b-b9af46d15be1\") " Oct 11 04:33:19 crc kubenswrapper[4798]: I1011 04:33:19.693036 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/e75ed635-7172-447f-ab1b-b9af46d15be1-inventory-0\") pod \"e75ed635-7172-447f-ab1b-b9af46d15be1\" (UID: \"e75ed635-7172-447f-ab1b-b9af46d15be1\") " Oct 11 04:33:19 crc kubenswrapper[4798]: I1011 04:33:19.693076 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-scsgv\" (UniqueName: \"kubernetes.io/projected/e75ed635-7172-447f-ab1b-b9af46d15be1-kube-api-access-scsgv\") pod \"e75ed635-7172-447f-ab1b-b9af46d15be1\" (UID: \"e75ed635-7172-447f-ab1b-b9af46d15be1\") " Oct 11 04:33:19 crc kubenswrapper[4798]: I1011 04:33:19.702678 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e75ed635-7172-447f-ab1b-b9af46d15be1-kube-api-access-scsgv" (OuterVolumeSpecName: "kube-api-access-scsgv") pod "e75ed635-7172-447f-ab1b-b9af46d15be1" (UID: "e75ed635-7172-447f-ab1b-b9af46d15be1"). InnerVolumeSpecName "kube-api-access-scsgv". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 04:33:19 crc kubenswrapper[4798]: I1011 04:33:19.705640 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e75ed635-7172-447f-ab1b-b9af46d15be1-ceph" (OuterVolumeSpecName: "ceph") pod "e75ed635-7172-447f-ab1b-b9af46d15be1" (UID: "e75ed635-7172-447f-ab1b-b9af46d15be1"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:33:19 crc kubenswrapper[4798]: I1011 04:33:19.750046 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e75ed635-7172-447f-ab1b-b9af46d15be1-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "e75ed635-7172-447f-ab1b-b9af46d15be1" (UID: "e75ed635-7172-447f-ab1b-b9af46d15be1"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:33:19 crc kubenswrapper[4798]: I1011 04:33:19.760094 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e75ed635-7172-447f-ab1b-b9af46d15be1-inventory-0" (OuterVolumeSpecName: "inventory-0") pod "e75ed635-7172-447f-ab1b-b9af46d15be1" (UID: "e75ed635-7172-447f-ab1b-b9af46d15be1"). InnerVolumeSpecName "inventory-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:33:19 crc kubenswrapper[4798]: I1011 04:33:19.797516 4798 reconciler_common.go:293] "Volume detached for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/e75ed635-7172-447f-ab1b-b9af46d15be1-inventory-0\") on node \"crc\" DevicePath \"\"" Oct 11 04:33:19 crc kubenswrapper[4798]: I1011 04:33:19.797571 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-scsgv\" (UniqueName: \"kubernetes.io/projected/e75ed635-7172-447f-ab1b-b9af46d15be1-kube-api-access-scsgv\") on node \"crc\" DevicePath \"\"" Oct 11 04:33:19 crc kubenswrapper[4798]: I1011 04:33:19.797593 4798 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/e75ed635-7172-447f-ab1b-b9af46d15be1-ceph\") on node \"crc\" DevicePath \"\"" Oct 11 04:33:19 crc kubenswrapper[4798]: I1011 04:33:19.797608 4798 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/e75ed635-7172-447f-ab1b-b9af46d15be1-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Oct 11 04:33:20 crc kubenswrapper[4798]: I1011 04:33:20.043931 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-rznrc" event={"ID":"e75ed635-7172-447f-ab1b-b9af46d15be1","Type":"ContainerDied","Data":"3e96aff4d83e02f4b605ec889fbc4b1d36e6c630a45306b32409d5182387c73d"} Oct 11 04:33:20 crc kubenswrapper[4798]: I1011 04:33:20.043986 4798 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3e96aff4d83e02f4b605ec889fbc4b1d36e6c630a45306b32409d5182387c73d" Oct 11 04:33:20 crc kubenswrapper[4798]: I1011 04:33:20.044060 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-rznrc" Oct 11 04:33:20 crc kubenswrapper[4798]: I1011 04:33:20.181418 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-s9wvs"] Oct 11 04:33:20 crc kubenswrapper[4798]: E1011 04:33:20.183276 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e75ed635-7172-447f-ab1b-b9af46d15be1" containerName="ssh-known-hosts-edpm-deployment" Oct 11 04:33:20 crc kubenswrapper[4798]: I1011 04:33:20.183344 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="e75ed635-7172-447f-ab1b-b9af46d15be1" containerName="ssh-known-hosts-edpm-deployment" Oct 11 04:33:20 crc kubenswrapper[4798]: I1011 04:33:20.184337 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="e75ed635-7172-447f-ab1b-b9af46d15be1" containerName="ssh-known-hosts-edpm-deployment" Oct 11 04:33:20 crc kubenswrapper[4798]: I1011 04:33:20.185967 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-s9wvs" Oct 11 04:33:20 crc kubenswrapper[4798]: I1011 04:33:20.188952 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceph-conf-files" Oct 11 04:33:20 crc kubenswrapper[4798]: I1011 04:33:20.189848 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Oct 11 04:33:20 crc kubenswrapper[4798]: I1011 04:33:20.189940 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Oct 11 04:33:20 crc kubenswrapper[4798]: I1011 04:33:20.190107 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-mgsh6" Oct 11 04:33:20 crc kubenswrapper[4798]: I1011 04:33:20.190258 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Oct 11 04:33:20 crc kubenswrapper[4798]: I1011 04:33:20.199010 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-s9wvs"] Oct 11 04:33:20 crc kubenswrapper[4798]: I1011 04:33:20.318083 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t2nv2\" (UniqueName: \"kubernetes.io/projected/230b772a-a9d9-4318-ada4-a83901d636b5-kube-api-access-t2nv2\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-s9wvs\" (UID: \"230b772a-a9d9-4318-ada4-a83901d636b5\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-s9wvs" Oct 11 04:33:20 crc kubenswrapper[4798]: I1011 04:33:20.318205 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/230b772a-a9d9-4318-ada4-a83901d636b5-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-s9wvs\" (UID: \"230b772a-a9d9-4318-ada4-a83901d636b5\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-s9wvs" Oct 11 04:33:20 crc kubenswrapper[4798]: I1011 04:33:20.318242 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/230b772a-a9d9-4318-ada4-a83901d636b5-ceph\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-s9wvs\" (UID: \"230b772a-a9d9-4318-ada4-a83901d636b5\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-s9wvs" Oct 11 04:33:20 crc kubenswrapper[4798]: I1011 04:33:20.318260 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/230b772a-a9d9-4318-ada4-a83901d636b5-ssh-key\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-s9wvs\" (UID: \"230b772a-a9d9-4318-ada4-a83901d636b5\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-s9wvs" Oct 11 04:33:20 crc kubenswrapper[4798]: I1011 04:33:20.419701 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/230b772a-a9d9-4318-ada4-a83901d636b5-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-s9wvs\" (UID: \"230b772a-a9d9-4318-ada4-a83901d636b5\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-s9wvs" Oct 11 04:33:20 crc kubenswrapper[4798]: I1011 04:33:20.419783 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/230b772a-a9d9-4318-ada4-a83901d636b5-ceph\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-s9wvs\" (UID: \"230b772a-a9d9-4318-ada4-a83901d636b5\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-s9wvs" Oct 11 04:33:20 crc kubenswrapper[4798]: I1011 04:33:20.419818 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/230b772a-a9d9-4318-ada4-a83901d636b5-ssh-key\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-s9wvs\" (UID: \"230b772a-a9d9-4318-ada4-a83901d636b5\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-s9wvs" Oct 11 04:33:20 crc kubenswrapper[4798]: I1011 04:33:20.419937 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t2nv2\" (UniqueName: \"kubernetes.io/projected/230b772a-a9d9-4318-ada4-a83901d636b5-kube-api-access-t2nv2\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-s9wvs\" (UID: \"230b772a-a9d9-4318-ada4-a83901d636b5\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-s9wvs" Oct 11 04:33:20 crc kubenswrapper[4798]: I1011 04:33:20.425233 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/230b772a-a9d9-4318-ada4-a83901d636b5-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-s9wvs\" (UID: \"230b772a-a9d9-4318-ada4-a83901d636b5\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-s9wvs" Oct 11 04:33:20 crc kubenswrapper[4798]: I1011 04:33:20.426809 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/230b772a-a9d9-4318-ada4-a83901d636b5-ssh-key\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-s9wvs\" (UID: \"230b772a-a9d9-4318-ada4-a83901d636b5\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-s9wvs" Oct 11 04:33:20 crc kubenswrapper[4798]: I1011 04:33:20.428449 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/230b772a-a9d9-4318-ada4-a83901d636b5-ceph\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-s9wvs\" (UID: \"230b772a-a9d9-4318-ada4-a83901d636b5\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-s9wvs" Oct 11 04:33:20 crc kubenswrapper[4798]: I1011 04:33:20.445442 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t2nv2\" (UniqueName: \"kubernetes.io/projected/230b772a-a9d9-4318-ada4-a83901d636b5-kube-api-access-t2nv2\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-s9wvs\" (UID: \"230b772a-a9d9-4318-ada4-a83901d636b5\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-s9wvs" Oct 11 04:33:20 crc kubenswrapper[4798]: I1011 04:33:20.533381 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-s9wvs" Oct 11 04:33:21 crc kubenswrapper[4798]: I1011 04:33:21.216873 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-s9wvs"] Oct 11 04:33:22 crc kubenswrapper[4798]: I1011 04:33:22.070733 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-s9wvs" event={"ID":"230b772a-a9d9-4318-ada4-a83901d636b5","Type":"ContainerStarted","Data":"b7c083fbc09886a7cb76125e3c94109683f42de05d2731454534eb59cbdf5d11"} Oct 11 04:33:22 crc kubenswrapper[4798]: I1011 04:33:22.071549 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-s9wvs" event={"ID":"230b772a-a9d9-4318-ada4-a83901d636b5","Type":"ContainerStarted","Data":"5b2dc91043fe5f031a1ede01c68451706a98d0d3149818352fdf68a26fca709e"} Oct 11 04:33:22 crc kubenswrapper[4798]: I1011 04:33:22.097525 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-s9wvs" podStartSLOduration=1.613011326 podStartE2EDuration="2.097509674s" podCreationTimestamp="2025-10-11 04:33:20 +0000 UTC" firstStartedPulling="2025-10-11 04:33:21.220421275 +0000 UTC m=+2296.556710971" lastFinishedPulling="2025-10-11 04:33:21.704919623 +0000 UTC m=+2297.041209319" observedRunningTime="2025-10-11 04:33:22.09549376 +0000 UTC m=+2297.431783446" watchObservedRunningTime="2025-10-11 04:33:22.097509674 +0000 UTC m=+2297.433799350" Oct 11 04:33:30 crc kubenswrapper[4798]: I1011 04:33:30.164118 4798 generic.go:334] "Generic (PLEG): container finished" podID="230b772a-a9d9-4318-ada4-a83901d636b5" containerID="b7c083fbc09886a7cb76125e3c94109683f42de05d2731454534eb59cbdf5d11" exitCode=0 Oct 11 04:33:30 crc kubenswrapper[4798]: I1011 04:33:30.164226 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-s9wvs" event={"ID":"230b772a-a9d9-4318-ada4-a83901d636b5","Type":"ContainerDied","Data":"b7c083fbc09886a7cb76125e3c94109683f42de05d2731454534eb59cbdf5d11"} Oct 11 04:33:30 crc kubenswrapper[4798]: I1011 04:33:30.423854 4798 scope.go:117] "RemoveContainer" containerID="e764c82b5ef4905e7c6c502957bfa38bb423ee4a4677ee706a5d2b87d33d2c1c" Oct 11 04:33:30 crc kubenswrapper[4798]: E1011 04:33:30.424780 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h28s2_openshift-machine-config-operator(42571bc8-2186-4e3b-bba9-28f5a8f364d0)\"" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" Oct 11 04:33:31 crc kubenswrapper[4798]: I1011 04:33:31.662889 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-s9wvs" Oct 11 04:33:31 crc kubenswrapper[4798]: I1011 04:33:31.725568 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/230b772a-a9d9-4318-ada4-a83901d636b5-ssh-key\") pod \"230b772a-a9d9-4318-ada4-a83901d636b5\" (UID: \"230b772a-a9d9-4318-ada4-a83901d636b5\") " Oct 11 04:33:31 crc kubenswrapper[4798]: I1011 04:33:31.725635 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t2nv2\" (UniqueName: \"kubernetes.io/projected/230b772a-a9d9-4318-ada4-a83901d636b5-kube-api-access-t2nv2\") pod \"230b772a-a9d9-4318-ada4-a83901d636b5\" (UID: \"230b772a-a9d9-4318-ada4-a83901d636b5\") " Oct 11 04:33:31 crc kubenswrapper[4798]: I1011 04:33:31.725695 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/230b772a-a9d9-4318-ada4-a83901d636b5-ceph\") pod \"230b772a-a9d9-4318-ada4-a83901d636b5\" (UID: \"230b772a-a9d9-4318-ada4-a83901d636b5\") " Oct 11 04:33:31 crc kubenswrapper[4798]: I1011 04:33:31.725747 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/230b772a-a9d9-4318-ada4-a83901d636b5-inventory\") pod \"230b772a-a9d9-4318-ada4-a83901d636b5\" (UID: \"230b772a-a9d9-4318-ada4-a83901d636b5\") " Oct 11 04:33:31 crc kubenswrapper[4798]: I1011 04:33:31.735209 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/230b772a-a9d9-4318-ada4-a83901d636b5-ceph" (OuterVolumeSpecName: "ceph") pod "230b772a-a9d9-4318-ada4-a83901d636b5" (UID: "230b772a-a9d9-4318-ada4-a83901d636b5"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:33:31 crc kubenswrapper[4798]: I1011 04:33:31.735941 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/230b772a-a9d9-4318-ada4-a83901d636b5-kube-api-access-t2nv2" (OuterVolumeSpecName: "kube-api-access-t2nv2") pod "230b772a-a9d9-4318-ada4-a83901d636b5" (UID: "230b772a-a9d9-4318-ada4-a83901d636b5"). InnerVolumeSpecName "kube-api-access-t2nv2". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 04:33:31 crc kubenswrapper[4798]: I1011 04:33:31.766423 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/230b772a-a9d9-4318-ada4-a83901d636b5-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "230b772a-a9d9-4318-ada4-a83901d636b5" (UID: "230b772a-a9d9-4318-ada4-a83901d636b5"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:33:31 crc kubenswrapper[4798]: I1011 04:33:31.767594 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/230b772a-a9d9-4318-ada4-a83901d636b5-inventory" (OuterVolumeSpecName: "inventory") pod "230b772a-a9d9-4318-ada4-a83901d636b5" (UID: "230b772a-a9d9-4318-ada4-a83901d636b5"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:33:31 crc kubenswrapper[4798]: I1011 04:33:31.828794 4798 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/230b772a-a9d9-4318-ada4-a83901d636b5-ssh-key\") on node \"crc\" DevicePath \"\"" Oct 11 04:33:31 crc kubenswrapper[4798]: I1011 04:33:31.828842 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-t2nv2\" (UniqueName: \"kubernetes.io/projected/230b772a-a9d9-4318-ada4-a83901d636b5-kube-api-access-t2nv2\") on node \"crc\" DevicePath \"\"" Oct 11 04:33:31 crc kubenswrapper[4798]: I1011 04:33:31.828873 4798 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/230b772a-a9d9-4318-ada4-a83901d636b5-ceph\") on node \"crc\" DevicePath \"\"" Oct 11 04:33:31 crc kubenswrapper[4798]: I1011 04:33:31.828884 4798 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/230b772a-a9d9-4318-ada4-a83901d636b5-inventory\") on node \"crc\" DevicePath \"\"" Oct 11 04:33:32 crc kubenswrapper[4798]: I1011 04:33:32.187370 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-s9wvs" event={"ID":"230b772a-a9d9-4318-ada4-a83901d636b5","Type":"ContainerDied","Data":"5b2dc91043fe5f031a1ede01c68451706a98d0d3149818352fdf68a26fca709e"} Oct 11 04:33:32 crc kubenswrapper[4798]: I1011 04:33:32.188009 4798 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5b2dc91043fe5f031a1ede01c68451706a98d0d3149818352fdf68a26fca709e" Oct 11 04:33:32 crc kubenswrapper[4798]: I1011 04:33:32.187553 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-s9wvs" Oct 11 04:33:32 crc kubenswrapper[4798]: I1011 04:33:32.305856 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-qthb4"] Oct 11 04:33:32 crc kubenswrapper[4798]: E1011 04:33:32.306526 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="230b772a-a9d9-4318-ada4-a83901d636b5" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Oct 11 04:33:32 crc kubenswrapper[4798]: I1011 04:33:32.306546 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="230b772a-a9d9-4318-ada4-a83901d636b5" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Oct 11 04:33:32 crc kubenswrapper[4798]: I1011 04:33:32.306824 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="230b772a-a9d9-4318-ada4-a83901d636b5" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Oct 11 04:33:32 crc kubenswrapper[4798]: I1011 04:33:32.307822 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-qthb4" Oct 11 04:33:32 crc kubenswrapper[4798]: I1011 04:33:32.313719 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-mgsh6" Oct 11 04:33:32 crc kubenswrapper[4798]: I1011 04:33:32.314155 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceph-conf-files" Oct 11 04:33:32 crc kubenswrapper[4798]: I1011 04:33:32.314380 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Oct 11 04:33:32 crc kubenswrapper[4798]: I1011 04:33:32.314963 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Oct 11 04:33:32 crc kubenswrapper[4798]: I1011 04:33:32.314995 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Oct 11 04:33:32 crc kubenswrapper[4798]: I1011 04:33:32.324131 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-qthb4"] Oct 11 04:33:32 crc kubenswrapper[4798]: I1011 04:33:32.445187 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/c7e25630-2ac6-435f-bba0-281aaee8a48d-ceph\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-qthb4\" (UID: \"c7e25630-2ac6-435f-bba0-281aaee8a48d\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-qthb4" Oct 11 04:33:32 crc kubenswrapper[4798]: I1011 04:33:32.445776 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nc7xd\" (UniqueName: \"kubernetes.io/projected/c7e25630-2ac6-435f-bba0-281aaee8a48d-kube-api-access-nc7xd\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-qthb4\" (UID: \"c7e25630-2ac6-435f-bba0-281aaee8a48d\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-qthb4" Oct 11 04:33:32 crc kubenswrapper[4798]: I1011 04:33:32.445894 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c7e25630-2ac6-435f-bba0-281aaee8a48d-ssh-key\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-qthb4\" (UID: \"c7e25630-2ac6-435f-bba0-281aaee8a48d\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-qthb4" Oct 11 04:33:32 crc kubenswrapper[4798]: I1011 04:33:32.446144 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c7e25630-2ac6-435f-bba0-281aaee8a48d-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-qthb4\" (UID: \"c7e25630-2ac6-435f-bba0-281aaee8a48d\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-qthb4" Oct 11 04:33:32 crc kubenswrapper[4798]: I1011 04:33:32.548047 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nc7xd\" (UniqueName: \"kubernetes.io/projected/c7e25630-2ac6-435f-bba0-281aaee8a48d-kube-api-access-nc7xd\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-qthb4\" (UID: \"c7e25630-2ac6-435f-bba0-281aaee8a48d\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-qthb4" Oct 11 04:33:32 crc kubenswrapper[4798]: I1011 04:33:32.548115 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c7e25630-2ac6-435f-bba0-281aaee8a48d-ssh-key\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-qthb4\" (UID: \"c7e25630-2ac6-435f-bba0-281aaee8a48d\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-qthb4" Oct 11 04:33:32 crc kubenswrapper[4798]: I1011 04:33:32.548150 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c7e25630-2ac6-435f-bba0-281aaee8a48d-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-qthb4\" (UID: \"c7e25630-2ac6-435f-bba0-281aaee8a48d\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-qthb4" Oct 11 04:33:32 crc kubenswrapper[4798]: I1011 04:33:32.548280 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/c7e25630-2ac6-435f-bba0-281aaee8a48d-ceph\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-qthb4\" (UID: \"c7e25630-2ac6-435f-bba0-281aaee8a48d\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-qthb4" Oct 11 04:33:32 crc kubenswrapper[4798]: I1011 04:33:32.553625 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/c7e25630-2ac6-435f-bba0-281aaee8a48d-ceph\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-qthb4\" (UID: \"c7e25630-2ac6-435f-bba0-281aaee8a48d\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-qthb4" Oct 11 04:33:32 crc kubenswrapper[4798]: I1011 04:33:32.555154 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c7e25630-2ac6-435f-bba0-281aaee8a48d-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-qthb4\" (UID: \"c7e25630-2ac6-435f-bba0-281aaee8a48d\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-qthb4" Oct 11 04:33:32 crc kubenswrapper[4798]: I1011 04:33:32.559678 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c7e25630-2ac6-435f-bba0-281aaee8a48d-ssh-key\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-qthb4\" (UID: \"c7e25630-2ac6-435f-bba0-281aaee8a48d\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-qthb4" Oct 11 04:33:32 crc kubenswrapper[4798]: I1011 04:33:32.572633 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nc7xd\" (UniqueName: \"kubernetes.io/projected/c7e25630-2ac6-435f-bba0-281aaee8a48d-kube-api-access-nc7xd\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-qthb4\" (UID: \"c7e25630-2ac6-435f-bba0-281aaee8a48d\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-qthb4" Oct 11 04:33:32 crc kubenswrapper[4798]: I1011 04:33:32.635165 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-qthb4" Oct 11 04:33:33 crc kubenswrapper[4798]: I1011 04:33:33.245246 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-qthb4"] Oct 11 04:33:33 crc kubenswrapper[4798]: W1011 04:33:33.254382 4798 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc7e25630_2ac6_435f_bba0_281aaee8a48d.slice/crio-254eb8a3dc0631bda7bfa39c2aaa7424b1d07c1282619895b1bbce932f9134cf WatchSource:0}: Error finding container 254eb8a3dc0631bda7bfa39c2aaa7424b1d07c1282619895b1bbce932f9134cf: Status 404 returned error can't find the container with id 254eb8a3dc0631bda7bfa39c2aaa7424b1d07c1282619895b1bbce932f9134cf Oct 11 04:33:34 crc kubenswrapper[4798]: I1011 04:33:34.213329 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-qthb4" event={"ID":"c7e25630-2ac6-435f-bba0-281aaee8a48d","Type":"ContainerStarted","Data":"d6f021b3859628b81a52f419cbd1d1f94724c1f7bd7faa6c2bfeb06d2047ebb4"} Oct 11 04:33:34 crc kubenswrapper[4798]: I1011 04:33:34.213863 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-qthb4" event={"ID":"c7e25630-2ac6-435f-bba0-281aaee8a48d","Type":"ContainerStarted","Data":"254eb8a3dc0631bda7bfa39c2aaa7424b1d07c1282619895b1bbce932f9134cf"} Oct 11 04:33:34 crc kubenswrapper[4798]: I1011 04:33:34.299736 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-qthb4" podStartSLOduration=1.782922769 podStartE2EDuration="2.299714119s" podCreationTimestamp="2025-10-11 04:33:32 +0000 UTC" firstStartedPulling="2025-10-11 04:33:33.258026883 +0000 UTC m=+2308.594316579" lastFinishedPulling="2025-10-11 04:33:33.774818203 +0000 UTC m=+2309.111107929" observedRunningTime="2025-10-11 04:33:34.285166843 +0000 UTC m=+2309.621456569" watchObservedRunningTime="2025-10-11 04:33:34.299714119 +0000 UTC m=+2309.636003805" Oct 11 04:33:45 crc kubenswrapper[4798]: I1011 04:33:45.350333 4798 generic.go:334] "Generic (PLEG): container finished" podID="c7e25630-2ac6-435f-bba0-281aaee8a48d" containerID="d6f021b3859628b81a52f419cbd1d1f94724c1f7bd7faa6c2bfeb06d2047ebb4" exitCode=0 Oct 11 04:33:45 crc kubenswrapper[4798]: I1011 04:33:45.350424 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-qthb4" event={"ID":"c7e25630-2ac6-435f-bba0-281aaee8a48d","Type":"ContainerDied","Data":"d6f021b3859628b81a52f419cbd1d1f94724c1f7bd7faa6c2bfeb06d2047ebb4"} Oct 11 04:33:45 crc kubenswrapper[4798]: I1011 04:33:45.430418 4798 scope.go:117] "RemoveContainer" containerID="e764c82b5ef4905e7c6c502957bfa38bb423ee4a4677ee706a5d2b87d33d2c1c" Oct 11 04:33:45 crc kubenswrapper[4798]: E1011 04:33:45.430645 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h28s2_openshift-machine-config-operator(42571bc8-2186-4e3b-bba9-28f5a8f364d0)\"" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" Oct 11 04:33:46 crc kubenswrapper[4798]: I1011 04:33:46.847690 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-qthb4" Oct 11 04:33:46 crc kubenswrapper[4798]: I1011 04:33:46.920437 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/c7e25630-2ac6-435f-bba0-281aaee8a48d-ceph\") pod \"c7e25630-2ac6-435f-bba0-281aaee8a48d\" (UID: \"c7e25630-2ac6-435f-bba0-281aaee8a48d\") " Oct 11 04:33:46 crc kubenswrapper[4798]: I1011 04:33:46.920606 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c7e25630-2ac6-435f-bba0-281aaee8a48d-inventory\") pod \"c7e25630-2ac6-435f-bba0-281aaee8a48d\" (UID: \"c7e25630-2ac6-435f-bba0-281aaee8a48d\") " Oct 11 04:33:46 crc kubenswrapper[4798]: I1011 04:33:46.920763 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nc7xd\" (UniqueName: \"kubernetes.io/projected/c7e25630-2ac6-435f-bba0-281aaee8a48d-kube-api-access-nc7xd\") pod \"c7e25630-2ac6-435f-bba0-281aaee8a48d\" (UID: \"c7e25630-2ac6-435f-bba0-281aaee8a48d\") " Oct 11 04:33:46 crc kubenswrapper[4798]: I1011 04:33:46.920815 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c7e25630-2ac6-435f-bba0-281aaee8a48d-ssh-key\") pod \"c7e25630-2ac6-435f-bba0-281aaee8a48d\" (UID: \"c7e25630-2ac6-435f-bba0-281aaee8a48d\") " Oct 11 04:33:46 crc kubenswrapper[4798]: I1011 04:33:46.928457 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c7e25630-2ac6-435f-bba0-281aaee8a48d-kube-api-access-nc7xd" (OuterVolumeSpecName: "kube-api-access-nc7xd") pod "c7e25630-2ac6-435f-bba0-281aaee8a48d" (UID: "c7e25630-2ac6-435f-bba0-281aaee8a48d"). InnerVolumeSpecName "kube-api-access-nc7xd". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 04:33:46 crc kubenswrapper[4798]: I1011 04:33:46.939904 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c7e25630-2ac6-435f-bba0-281aaee8a48d-ceph" (OuterVolumeSpecName: "ceph") pod "c7e25630-2ac6-435f-bba0-281aaee8a48d" (UID: "c7e25630-2ac6-435f-bba0-281aaee8a48d"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:33:46 crc kubenswrapper[4798]: I1011 04:33:46.954555 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c7e25630-2ac6-435f-bba0-281aaee8a48d-inventory" (OuterVolumeSpecName: "inventory") pod "c7e25630-2ac6-435f-bba0-281aaee8a48d" (UID: "c7e25630-2ac6-435f-bba0-281aaee8a48d"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:33:46 crc kubenswrapper[4798]: I1011 04:33:46.961270 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c7e25630-2ac6-435f-bba0-281aaee8a48d-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "c7e25630-2ac6-435f-bba0-281aaee8a48d" (UID: "c7e25630-2ac6-435f-bba0-281aaee8a48d"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:33:47 crc kubenswrapper[4798]: I1011 04:33:47.023538 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nc7xd\" (UniqueName: \"kubernetes.io/projected/c7e25630-2ac6-435f-bba0-281aaee8a48d-kube-api-access-nc7xd\") on node \"crc\" DevicePath \"\"" Oct 11 04:33:47 crc kubenswrapper[4798]: I1011 04:33:47.023600 4798 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c7e25630-2ac6-435f-bba0-281aaee8a48d-ssh-key\") on node \"crc\" DevicePath \"\"" Oct 11 04:33:47 crc kubenswrapper[4798]: I1011 04:33:47.023612 4798 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/c7e25630-2ac6-435f-bba0-281aaee8a48d-ceph\") on node \"crc\" DevicePath \"\"" Oct 11 04:33:47 crc kubenswrapper[4798]: I1011 04:33:47.023620 4798 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c7e25630-2ac6-435f-bba0-281aaee8a48d-inventory\") on node \"crc\" DevicePath \"\"" Oct 11 04:33:47 crc kubenswrapper[4798]: I1011 04:33:47.377667 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-qthb4" event={"ID":"c7e25630-2ac6-435f-bba0-281aaee8a48d","Type":"ContainerDied","Data":"254eb8a3dc0631bda7bfa39c2aaa7424b1d07c1282619895b1bbce932f9134cf"} Oct 11 04:33:47 crc kubenswrapper[4798]: I1011 04:33:47.378167 4798 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="254eb8a3dc0631bda7bfa39c2aaa7424b1d07c1282619895b1bbce932f9134cf" Oct 11 04:33:47 crc kubenswrapper[4798]: I1011 04:33:47.377923 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-qthb4" Oct 11 04:33:47 crc kubenswrapper[4798]: I1011 04:33:47.507165 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qgt9f"] Oct 11 04:33:47 crc kubenswrapper[4798]: E1011 04:33:47.507886 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c7e25630-2ac6-435f-bba0-281aaee8a48d" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Oct 11 04:33:47 crc kubenswrapper[4798]: I1011 04:33:47.507918 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="c7e25630-2ac6-435f-bba0-281aaee8a48d" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Oct 11 04:33:47 crc kubenswrapper[4798]: I1011 04:33:47.508184 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="c7e25630-2ac6-435f-bba0-281aaee8a48d" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Oct 11 04:33:47 crc kubenswrapper[4798]: I1011 04:33:47.509227 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qgt9f" Oct 11 04:33:47 crc kubenswrapper[4798]: I1011 04:33:47.514438 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Oct 11 04:33:47 crc kubenswrapper[4798]: I1011 04:33:47.514676 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-neutron-metadata-default-certs-0" Oct 11 04:33:47 crc kubenswrapper[4798]: I1011 04:33:47.514838 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-mgsh6" Oct 11 04:33:47 crc kubenswrapper[4798]: I1011 04:33:47.514996 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Oct 11 04:33:47 crc kubenswrapper[4798]: I1011 04:33:47.515273 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Oct 11 04:33:47 crc kubenswrapper[4798]: I1011 04:33:47.515432 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-libvirt-default-certs-0" Oct 11 04:33:47 crc kubenswrapper[4798]: I1011 04:33:47.515563 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-ovn-default-certs-0" Oct 11 04:33:47 crc kubenswrapper[4798]: I1011 04:33:47.515707 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceph-conf-files" Oct 11 04:33:47 crc kubenswrapper[4798]: I1011 04:33:47.526629 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qgt9f"] Oct 11 04:33:47 crc kubenswrapper[4798]: I1011 04:33:47.640997 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5f08cfce-c132-471d-b2c8-e725c0c5b7ec-libvirt-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-qgt9f\" (UID: \"5f08cfce-c132-471d-b2c8-e725c0c5b7ec\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qgt9f" Oct 11 04:33:47 crc kubenswrapper[4798]: I1011 04:33:47.641062 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5f08cfce-c132-471d-b2c8-e725c0c5b7ec-ovn-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-qgt9f\" (UID: \"5f08cfce-c132-471d-b2c8-e725c0c5b7ec\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qgt9f" Oct 11 04:33:47 crc kubenswrapper[4798]: I1011 04:33:47.641088 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5f08cfce-c132-471d-b2c8-e725c0c5b7ec-neutron-metadata-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-qgt9f\" (UID: \"5f08cfce-c132-471d-b2c8-e725c0c5b7ec\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qgt9f" Oct 11 04:33:47 crc kubenswrapper[4798]: I1011 04:33:47.641120 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5f08cfce-c132-471d-b2c8-e725c0c5b7ec-bootstrap-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-qgt9f\" (UID: \"5f08cfce-c132-471d-b2c8-e725c0c5b7ec\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qgt9f" Oct 11 04:33:47 crc kubenswrapper[4798]: I1011 04:33:47.641184 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8b2wg\" (UniqueName: \"kubernetes.io/projected/5f08cfce-c132-471d-b2c8-e725c0c5b7ec-kube-api-access-8b2wg\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-qgt9f\" (UID: \"5f08cfce-c132-471d-b2c8-e725c0c5b7ec\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qgt9f" Oct 11 04:33:47 crc kubenswrapper[4798]: I1011 04:33:47.641209 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5f08cfce-c132-471d-b2c8-e725c0c5b7ec-nova-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-qgt9f\" (UID: \"5f08cfce-c132-471d-b2c8-e725c0c5b7ec\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qgt9f" Oct 11 04:33:47 crc kubenswrapper[4798]: I1011 04:33:47.641242 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/5f08cfce-c132-471d-b2c8-e725c0c5b7ec-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-qgt9f\" (UID: \"5f08cfce-c132-471d-b2c8-e725c0c5b7ec\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qgt9f" Oct 11 04:33:47 crc kubenswrapper[4798]: I1011 04:33:47.641263 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/5f08cfce-c132-471d-b2c8-e725c0c5b7ec-openstack-edpm-ipam-ovn-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-qgt9f\" (UID: \"5f08cfce-c132-471d-b2c8-e725c0c5b7ec\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qgt9f" Oct 11 04:33:47 crc kubenswrapper[4798]: I1011 04:33:47.641287 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/5f08cfce-c132-471d-b2c8-e725c0c5b7ec-ssh-key\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-qgt9f\" (UID: \"5f08cfce-c132-471d-b2c8-e725c0c5b7ec\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qgt9f" Oct 11 04:33:47 crc kubenswrapper[4798]: I1011 04:33:47.641329 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5f08cfce-c132-471d-b2c8-e725c0c5b7ec-repo-setup-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-qgt9f\" (UID: \"5f08cfce-c132-471d-b2c8-e725c0c5b7ec\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qgt9f" Oct 11 04:33:47 crc kubenswrapper[4798]: I1011 04:33:47.641366 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/5f08cfce-c132-471d-b2c8-e725c0c5b7ec-inventory\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-qgt9f\" (UID: \"5f08cfce-c132-471d-b2c8-e725c0c5b7ec\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qgt9f" Oct 11 04:33:47 crc kubenswrapper[4798]: I1011 04:33:47.641424 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/5f08cfce-c132-471d-b2c8-e725c0c5b7ec-ceph\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-qgt9f\" (UID: \"5f08cfce-c132-471d-b2c8-e725c0c5b7ec\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qgt9f" Oct 11 04:33:47 crc kubenswrapper[4798]: I1011 04:33:47.641447 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/5f08cfce-c132-471d-b2c8-e725c0c5b7ec-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-qgt9f\" (UID: \"5f08cfce-c132-471d-b2c8-e725c0c5b7ec\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qgt9f" Oct 11 04:33:47 crc kubenswrapper[4798]: I1011 04:33:47.743167 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8b2wg\" (UniqueName: \"kubernetes.io/projected/5f08cfce-c132-471d-b2c8-e725c0c5b7ec-kube-api-access-8b2wg\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-qgt9f\" (UID: \"5f08cfce-c132-471d-b2c8-e725c0c5b7ec\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qgt9f" Oct 11 04:33:47 crc kubenswrapper[4798]: I1011 04:33:47.743247 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5f08cfce-c132-471d-b2c8-e725c0c5b7ec-nova-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-qgt9f\" (UID: \"5f08cfce-c132-471d-b2c8-e725c0c5b7ec\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qgt9f" Oct 11 04:33:47 crc kubenswrapper[4798]: I1011 04:33:47.743287 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/5f08cfce-c132-471d-b2c8-e725c0c5b7ec-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-qgt9f\" (UID: \"5f08cfce-c132-471d-b2c8-e725c0c5b7ec\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qgt9f" Oct 11 04:33:47 crc kubenswrapper[4798]: I1011 04:33:47.743316 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/5f08cfce-c132-471d-b2c8-e725c0c5b7ec-openstack-edpm-ipam-ovn-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-qgt9f\" (UID: \"5f08cfce-c132-471d-b2c8-e725c0c5b7ec\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qgt9f" Oct 11 04:33:47 crc kubenswrapper[4798]: I1011 04:33:47.743345 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/5f08cfce-c132-471d-b2c8-e725c0c5b7ec-ssh-key\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-qgt9f\" (UID: \"5f08cfce-c132-471d-b2c8-e725c0c5b7ec\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qgt9f" Oct 11 04:33:47 crc kubenswrapper[4798]: I1011 04:33:47.743393 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5f08cfce-c132-471d-b2c8-e725c0c5b7ec-repo-setup-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-qgt9f\" (UID: \"5f08cfce-c132-471d-b2c8-e725c0c5b7ec\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qgt9f" Oct 11 04:33:47 crc kubenswrapper[4798]: I1011 04:33:47.743446 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/5f08cfce-c132-471d-b2c8-e725c0c5b7ec-inventory\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-qgt9f\" (UID: \"5f08cfce-c132-471d-b2c8-e725c0c5b7ec\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qgt9f" Oct 11 04:33:47 crc kubenswrapper[4798]: I1011 04:33:47.743484 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/5f08cfce-c132-471d-b2c8-e725c0c5b7ec-ceph\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-qgt9f\" (UID: \"5f08cfce-c132-471d-b2c8-e725c0c5b7ec\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qgt9f" Oct 11 04:33:47 crc kubenswrapper[4798]: I1011 04:33:47.743513 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/5f08cfce-c132-471d-b2c8-e725c0c5b7ec-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-qgt9f\" (UID: \"5f08cfce-c132-471d-b2c8-e725c0c5b7ec\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qgt9f" Oct 11 04:33:47 crc kubenswrapper[4798]: I1011 04:33:47.743559 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5f08cfce-c132-471d-b2c8-e725c0c5b7ec-libvirt-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-qgt9f\" (UID: \"5f08cfce-c132-471d-b2c8-e725c0c5b7ec\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qgt9f" Oct 11 04:33:47 crc kubenswrapper[4798]: I1011 04:33:47.743580 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5f08cfce-c132-471d-b2c8-e725c0c5b7ec-ovn-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-qgt9f\" (UID: \"5f08cfce-c132-471d-b2c8-e725c0c5b7ec\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qgt9f" Oct 11 04:33:47 crc kubenswrapper[4798]: I1011 04:33:47.743609 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5f08cfce-c132-471d-b2c8-e725c0c5b7ec-neutron-metadata-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-qgt9f\" (UID: \"5f08cfce-c132-471d-b2c8-e725c0c5b7ec\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qgt9f" Oct 11 04:33:47 crc kubenswrapper[4798]: I1011 04:33:47.743631 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5f08cfce-c132-471d-b2c8-e725c0c5b7ec-bootstrap-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-qgt9f\" (UID: \"5f08cfce-c132-471d-b2c8-e725c0c5b7ec\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qgt9f" Oct 11 04:33:47 crc kubenswrapper[4798]: I1011 04:33:47.750588 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5f08cfce-c132-471d-b2c8-e725c0c5b7ec-repo-setup-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-qgt9f\" (UID: \"5f08cfce-c132-471d-b2c8-e725c0c5b7ec\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qgt9f" Oct 11 04:33:47 crc kubenswrapper[4798]: I1011 04:33:47.750646 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/5f08cfce-c132-471d-b2c8-e725c0c5b7ec-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-qgt9f\" (UID: \"5f08cfce-c132-471d-b2c8-e725c0c5b7ec\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qgt9f" Oct 11 04:33:47 crc kubenswrapper[4798]: I1011 04:33:47.751671 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5f08cfce-c132-471d-b2c8-e725c0c5b7ec-bootstrap-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-qgt9f\" (UID: \"5f08cfce-c132-471d-b2c8-e725c0c5b7ec\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qgt9f" Oct 11 04:33:47 crc kubenswrapper[4798]: I1011 04:33:47.754911 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5f08cfce-c132-471d-b2c8-e725c0c5b7ec-nova-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-qgt9f\" (UID: \"5f08cfce-c132-471d-b2c8-e725c0c5b7ec\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qgt9f" Oct 11 04:33:47 crc kubenswrapper[4798]: I1011 04:33:47.755165 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/5f08cfce-c132-471d-b2c8-e725c0c5b7ec-inventory\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-qgt9f\" (UID: \"5f08cfce-c132-471d-b2c8-e725c0c5b7ec\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qgt9f" Oct 11 04:33:47 crc kubenswrapper[4798]: I1011 04:33:47.757509 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/5f08cfce-c132-471d-b2c8-e725c0c5b7ec-ssh-key\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-qgt9f\" (UID: \"5f08cfce-c132-471d-b2c8-e725c0c5b7ec\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qgt9f" Oct 11 04:33:47 crc kubenswrapper[4798]: I1011 04:33:47.758928 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5f08cfce-c132-471d-b2c8-e725c0c5b7ec-neutron-metadata-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-qgt9f\" (UID: \"5f08cfce-c132-471d-b2c8-e725c0c5b7ec\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qgt9f" Oct 11 04:33:47 crc kubenswrapper[4798]: I1011 04:33:47.760149 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5f08cfce-c132-471d-b2c8-e725c0c5b7ec-ovn-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-qgt9f\" (UID: \"5f08cfce-c132-471d-b2c8-e725c0c5b7ec\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qgt9f" Oct 11 04:33:47 crc kubenswrapper[4798]: I1011 04:33:47.761599 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5f08cfce-c132-471d-b2c8-e725c0c5b7ec-libvirt-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-qgt9f\" (UID: \"5f08cfce-c132-471d-b2c8-e725c0c5b7ec\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qgt9f" Oct 11 04:33:47 crc kubenswrapper[4798]: I1011 04:33:47.763058 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/5f08cfce-c132-471d-b2c8-e725c0c5b7ec-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-qgt9f\" (UID: \"5f08cfce-c132-471d-b2c8-e725c0c5b7ec\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qgt9f" Oct 11 04:33:47 crc kubenswrapper[4798]: I1011 04:33:47.763203 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/5f08cfce-c132-471d-b2c8-e725c0c5b7ec-ceph\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-qgt9f\" (UID: \"5f08cfce-c132-471d-b2c8-e725c0c5b7ec\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qgt9f" Oct 11 04:33:47 crc kubenswrapper[4798]: I1011 04:33:47.763847 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/5f08cfce-c132-471d-b2c8-e725c0c5b7ec-openstack-edpm-ipam-ovn-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-qgt9f\" (UID: \"5f08cfce-c132-471d-b2c8-e725c0c5b7ec\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qgt9f" Oct 11 04:33:47 crc kubenswrapper[4798]: I1011 04:33:47.774234 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8b2wg\" (UniqueName: \"kubernetes.io/projected/5f08cfce-c132-471d-b2c8-e725c0c5b7ec-kube-api-access-8b2wg\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-qgt9f\" (UID: \"5f08cfce-c132-471d-b2c8-e725c0c5b7ec\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qgt9f" Oct 11 04:33:47 crc kubenswrapper[4798]: I1011 04:33:47.843351 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qgt9f" Oct 11 04:33:48 crc kubenswrapper[4798]: I1011 04:33:48.446834 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qgt9f"] Oct 11 04:33:49 crc kubenswrapper[4798]: I1011 04:33:49.399538 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qgt9f" event={"ID":"5f08cfce-c132-471d-b2c8-e725c0c5b7ec","Type":"ContainerStarted","Data":"6893e34eafe86f5cf909871d896106d5e9a1a26dc8525f1991d0f26d2779b607"} Oct 11 04:33:49 crc kubenswrapper[4798]: I1011 04:33:49.399886 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qgt9f" event={"ID":"5f08cfce-c132-471d-b2c8-e725c0c5b7ec","Type":"ContainerStarted","Data":"ec284a37aee8db5fad5555d93b77cc3c508544b6fd8fc6535d559f8cd1c6e008"} Oct 11 04:33:49 crc kubenswrapper[4798]: I1011 04:33:49.431356 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qgt9f" podStartSLOduration=2.017149227 podStartE2EDuration="2.431329607s" podCreationTimestamp="2025-10-11 04:33:47 +0000 UTC" firstStartedPulling="2025-10-11 04:33:48.458683013 +0000 UTC m=+2323.794972699" lastFinishedPulling="2025-10-11 04:33:48.872863363 +0000 UTC m=+2324.209153079" observedRunningTime="2025-10-11 04:33:49.42590608 +0000 UTC m=+2324.762195766" watchObservedRunningTime="2025-10-11 04:33:49.431329607 +0000 UTC m=+2324.767619293" Oct 11 04:33:56 crc kubenswrapper[4798]: I1011 04:33:56.424297 4798 scope.go:117] "RemoveContainer" containerID="e764c82b5ef4905e7c6c502957bfa38bb423ee4a4677ee706a5d2b87d33d2c1c" Oct 11 04:33:56 crc kubenswrapper[4798]: E1011 04:33:56.426916 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h28s2_openshift-machine-config-operator(42571bc8-2186-4e3b-bba9-28f5a8f364d0)\"" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" Oct 11 04:34:08 crc kubenswrapper[4798]: I1011 04:34:08.424577 4798 scope.go:117] "RemoveContainer" containerID="e764c82b5ef4905e7c6c502957bfa38bb423ee4a4677ee706a5d2b87d33d2c1c" Oct 11 04:34:08 crc kubenswrapper[4798]: E1011 04:34:08.425950 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h28s2_openshift-machine-config-operator(42571bc8-2186-4e3b-bba9-28f5a8f364d0)\"" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" Oct 11 04:34:22 crc kubenswrapper[4798]: I1011 04:34:22.424321 4798 scope.go:117] "RemoveContainer" containerID="e764c82b5ef4905e7c6c502957bfa38bb423ee4a4677ee706a5d2b87d33d2c1c" Oct 11 04:34:22 crc kubenswrapper[4798]: E1011 04:34:22.425129 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h28s2_openshift-machine-config-operator(42571bc8-2186-4e3b-bba9-28f5a8f364d0)\"" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" Oct 11 04:34:26 crc kubenswrapper[4798]: I1011 04:34:26.855367 4798 generic.go:334] "Generic (PLEG): container finished" podID="5f08cfce-c132-471d-b2c8-e725c0c5b7ec" containerID="6893e34eafe86f5cf909871d896106d5e9a1a26dc8525f1991d0f26d2779b607" exitCode=0 Oct 11 04:34:26 crc kubenswrapper[4798]: I1011 04:34:26.855560 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qgt9f" event={"ID":"5f08cfce-c132-471d-b2c8-e725c0c5b7ec","Type":"ContainerDied","Data":"6893e34eafe86f5cf909871d896106d5e9a1a26dc8525f1991d0f26d2779b607"} Oct 11 04:34:28 crc kubenswrapper[4798]: I1011 04:34:28.395428 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qgt9f" Oct 11 04:34:28 crc kubenswrapper[4798]: I1011 04:34:28.554654 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5f08cfce-c132-471d-b2c8-e725c0c5b7ec-libvirt-combined-ca-bundle\") pod \"5f08cfce-c132-471d-b2c8-e725c0c5b7ec\" (UID: \"5f08cfce-c132-471d-b2c8-e725c0c5b7ec\") " Oct 11 04:34:28 crc kubenswrapper[4798]: I1011 04:34:28.554746 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/5f08cfce-c132-471d-b2c8-e725c0c5b7ec-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"5f08cfce-c132-471d-b2c8-e725c0c5b7ec\" (UID: \"5f08cfce-c132-471d-b2c8-e725c0c5b7ec\") " Oct 11 04:34:28 crc kubenswrapper[4798]: I1011 04:34:28.554834 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5f08cfce-c132-471d-b2c8-e725c0c5b7ec-ovn-combined-ca-bundle\") pod \"5f08cfce-c132-471d-b2c8-e725c0c5b7ec\" (UID: \"5f08cfce-c132-471d-b2c8-e725c0c5b7ec\") " Oct 11 04:34:28 crc kubenswrapper[4798]: I1011 04:34:28.554912 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5f08cfce-c132-471d-b2c8-e725c0c5b7ec-nova-combined-ca-bundle\") pod \"5f08cfce-c132-471d-b2c8-e725c0c5b7ec\" (UID: \"5f08cfce-c132-471d-b2c8-e725c0c5b7ec\") " Oct 11 04:34:28 crc kubenswrapper[4798]: I1011 04:34:28.555009 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/5f08cfce-c132-471d-b2c8-e725c0c5b7ec-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"5f08cfce-c132-471d-b2c8-e725c0c5b7ec\" (UID: \"5f08cfce-c132-471d-b2c8-e725c0c5b7ec\") " Oct 11 04:34:28 crc kubenswrapper[4798]: I1011 04:34:28.555055 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/5f08cfce-c132-471d-b2c8-e725c0c5b7ec-openstack-edpm-ipam-ovn-default-certs-0\") pod \"5f08cfce-c132-471d-b2c8-e725c0c5b7ec\" (UID: \"5f08cfce-c132-471d-b2c8-e725c0c5b7ec\") " Oct 11 04:34:28 crc kubenswrapper[4798]: I1011 04:34:28.555094 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/5f08cfce-c132-471d-b2c8-e725c0c5b7ec-ssh-key\") pod \"5f08cfce-c132-471d-b2c8-e725c0c5b7ec\" (UID: \"5f08cfce-c132-471d-b2c8-e725c0c5b7ec\") " Oct 11 04:34:28 crc kubenswrapper[4798]: I1011 04:34:28.555176 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5f08cfce-c132-471d-b2c8-e725c0c5b7ec-neutron-metadata-combined-ca-bundle\") pod \"5f08cfce-c132-471d-b2c8-e725c0c5b7ec\" (UID: \"5f08cfce-c132-471d-b2c8-e725c0c5b7ec\") " Oct 11 04:34:28 crc kubenswrapper[4798]: I1011 04:34:28.555309 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8b2wg\" (UniqueName: \"kubernetes.io/projected/5f08cfce-c132-471d-b2c8-e725c0c5b7ec-kube-api-access-8b2wg\") pod \"5f08cfce-c132-471d-b2c8-e725c0c5b7ec\" (UID: \"5f08cfce-c132-471d-b2c8-e725c0c5b7ec\") " Oct 11 04:34:28 crc kubenswrapper[4798]: I1011 04:34:28.555377 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5f08cfce-c132-471d-b2c8-e725c0c5b7ec-bootstrap-combined-ca-bundle\") pod \"5f08cfce-c132-471d-b2c8-e725c0c5b7ec\" (UID: \"5f08cfce-c132-471d-b2c8-e725c0c5b7ec\") " Oct 11 04:34:28 crc kubenswrapper[4798]: I1011 04:34:28.555469 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/5f08cfce-c132-471d-b2c8-e725c0c5b7ec-inventory\") pod \"5f08cfce-c132-471d-b2c8-e725c0c5b7ec\" (UID: \"5f08cfce-c132-471d-b2c8-e725c0c5b7ec\") " Oct 11 04:34:28 crc kubenswrapper[4798]: I1011 04:34:28.555499 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5f08cfce-c132-471d-b2c8-e725c0c5b7ec-repo-setup-combined-ca-bundle\") pod \"5f08cfce-c132-471d-b2c8-e725c0c5b7ec\" (UID: \"5f08cfce-c132-471d-b2c8-e725c0c5b7ec\") " Oct 11 04:34:28 crc kubenswrapper[4798]: I1011 04:34:28.555581 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/5f08cfce-c132-471d-b2c8-e725c0c5b7ec-ceph\") pod \"5f08cfce-c132-471d-b2c8-e725c0c5b7ec\" (UID: \"5f08cfce-c132-471d-b2c8-e725c0c5b7ec\") " Oct 11 04:34:28 crc kubenswrapper[4798]: I1011 04:34:28.563423 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5f08cfce-c132-471d-b2c8-e725c0c5b7ec-kube-api-access-8b2wg" (OuterVolumeSpecName: "kube-api-access-8b2wg") pod "5f08cfce-c132-471d-b2c8-e725c0c5b7ec" (UID: "5f08cfce-c132-471d-b2c8-e725c0c5b7ec"). InnerVolumeSpecName "kube-api-access-8b2wg". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 04:34:28 crc kubenswrapper[4798]: I1011 04:34:28.564233 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5f08cfce-c132-471d-b2c8-e725c0c5b7ec-repo-setup-combined-ca-bundle" (OuterVolumeSpecName: "repo-setup-combined-ca-bundle") pod "5f08cfce-c132-471d-b2c8-e725c0c5b7ec" (UID: "5f08cfce-c132-471d-b2c8-e725c0c5b7ec"). InnerVolumeSpecName "repo-setup-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:34:28 crc kubenswrapper[4798]: I1011 04:34:28.565545 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5f08cfce-c132-471d-b2c8-e725c0c5b7ec-bootstrap-combined-ca-bundle" (OuterVolumeSpecName: "bootstrap-combined-ca-bundle") pod "5f08cfce-c132-471d-b2c8-e725c0c5b7ec" (UID: "5f08cfce-c132-471d-b2c8-e725c0c5b7ec"). InnerVolumeSpecName "bootstrap-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:34:28 crc kubenswrapper[4798]: I1011 04:34:28.565845 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5f08cfce-c132-471d-b2c8-e725c0c5b7ec-nova-combined-ca-bundle" (OuterVolumeSpecName: "nova-combined-ca-bundle") pod "5f08cfce-c132-471d-b2c8-e725c0c5b7ec" (UID: "5f08cfce-c132-471d-b2c8-e725c0c5b7ec"). InnerVolumeSpecName "nova-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:34:28 crc kubenswrapper[4798]: I1011 04:34:28.565914 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5f08cfce-c132-471d-b2c8-e725c0c5b7ec-openstack-edpm-ipam-libvirt-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-libvirt-default-certs-0") pod "5f08cfce-c132-471d-b2c8-e725c0c5b7ec" (UID: "5f08cfce-c132-471d-b2c8-e725c0c5b7ec"). InnerVolumeSpecName "openstack-edpm-ipam-libvirt-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 04:34:28 crc kubenswrapper[4798]: I1011 04:34:28.567180 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5f08cfce-c132-471d-b2c8-e725c0c5b7ec-libvirt-combined-ca-bundle" (OuterVolumeSpecName: "libvirt-combined-ca-bundle") pod "5f08cfce-c132-471d-b2c8-e725c0c5b7ec" (UID: "5f08cfce-c132-471d-b2c8-e725c0c5b7ec"). InnerVolumeSpecName "libvirt-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:34:28 crc kubenswrapper[4798]: I1011 04:34:28.568150 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5f08cfce-c132-471d-b2c8-e725c0c5b7ec-openstack-edpm-ipam-ovn-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-ovn-default-certs-0") pod "5f08cfce-c132-471d-b2c8-e725c0c5b7ec" (UID: "5f08cfce-c132-471d-b2c8-e725c0c5b7ec"). InnerVolumeSpecName "openstack-edpm-ipam-ovn-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 04:34:28 crc kubenswrapper[4798]: I1011 04:34:28.568333 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5f08cfce-c132-471d-b2c8-e725c0c5b7ec-ovn-combined-ca-bundle" (OuterVolumeSpecName: "ovn-combined-ca-bundle") pod "5f08cfce-c132-471d-b2c8-e725c0c5b7ec" (UID: "5f08cfce-c132-471d-b2c8-e725c0c5b7ec"). InnerVolumeSpecName "ovn-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:34:28 crc kubenswrapper[4798]: I1011 04:34:28.568382 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5f08cfce-c132-471d-b2c8-e725c0c5b7ec-openstack-edpm-ipam-neutron-metadata-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-neutron-metadata-default-certs-0") pod "5f08cfce-c132-471d-b2c8-e725c0c5b7ec" (UID: "5f08cfce-c132-471d-b2c8-e725c0c5b7ec"). InnerVolumeSpecName "openstack-edpm-ipam-neutron-metadata-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 04:34:28 crc kubenswrapper[4798]: I1011 04:34:28.568606 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5f08cfce-c132-471d-b2c8-e725c0c5b7ec-ceph" (OuterVolumeSpecName: "ceph") pod "5f08cfce-c132-471d-b2c8-e725c0c5b7ec" (UID: "5f08cfce-c132-471d-b2c8-e725c0c5b7ec"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:34:28 crc kubenswrapper[4798]: I1011 04:34:28.575554 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5f08cfce-c132-471d-b2c8-e725c0c5b7ec-neutron-metadata-combined-ca-bundle" (OuterVolumeSpecName: "neutron-metadata-combined-ca-bundle") pod "5f08cfce-c132-471d-b2c8-e725c0c5b7ec" (UID: "5f08cfce-c132-471d-b2c8-e725c0c5b7ec"). InnerVolumeSpecName "neutron-metadata-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:34:28 crc kubenswrapper[4798]: I1011 04:34:28.599444 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5f08cfce-c132-471d-b2c8-e725c0c5b7ec-inventory" (OuterVolumeSpecName: "inventory") pod "5f08cfce-c132-471d-b2c8-e725c0c5b7ec" (UID: "5f08cfce-c132-471d-b2c8-e725c0c5b7ec"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:34:28 crc kubenswrapper[4798]: I1011 04:34:28.627242 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5f08cfce-c132-471d-b2c8-e725c0c5b7ec-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "5f08cfce-c132-471d-b2c8-e725c0c5b7ec" (UID: "5f08cfce-c132-471d-b2c8-e725c0c5b7ec"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:34:28 crc kubenswrapper[4798]: I1011 04:34:28.659883 4798 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/5f08cfce-c132-471d-b2c8-e725c0c5b7ec-ceph\") on node \"crc\" DevicePath \"\"" Oct 11 04:34:28 crc kubenswrapper[4798]: I1011 04:34:28.659928 4798 reconciler_common.go:293] "Volume detached for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5f08cfce-c132-471d-b2c8-e725c0c5b7ec-libvirt-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 11 04:34:28 crc kubenswrapper[4798]: I1011 04:34:28.659945 4798 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/5f08cfce-c132-471d-b2c8-e725c0c5b7ec-openstack-edpm-ipam-libvirt-default-certs-0\") on node \"crc\" DevicePath \"\"" Oct 11 04:34:28 crc kubenswrapper[4798]: I1011 04:34:28.659964 4798 reconciler_common.go:293] "Volume detached for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5f08cfce-c132-471d-b2c8-e725c0c5b7ec-ovn-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 11 04:34:28 crc kubenswrapper[4798]: I1011 04:34:28.659979 4798 reconciler_common.go:293] "Volume detached for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5f08cfce-c132-471d-b2c8-e725c0c5b7ec-nova-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 11 04:34:28 crc kubenswrapper[4798]: I1011 04:34:28.659993 4798 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/5f08cfce-c132-471d-b2c8-e725c0c5b7ec-openstack-edpm-ipam-neutron-metadata-default-certs-0\") on node \"crc\" DevicePath \"\"" Oct 11 04:34:28 crc kubenswrapper[4798]: I1011 04:34:28.660010 4798 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/5f08cfce-c132-471d-b2c8-e725c0c5b7ec-openstack-edpm-ipam-ovn-default-certs-0\") on node \"crc\" DevicePath \"\"" Oct 11 04:34:28 crc kubenswrapper[4798]: I1011 04:34:28.660024 4798 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/5f08cfce-c132-471d-b2c8-e725c0c5b7ec-ssh-key\") on node \"crc\" DevicePath \"\"" Oct 11 04:34:28 crc kubenswrapper[4798]: I1011 04:34:28.660038 4798 reconciler_common.go:293] "Volume detached for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5f08cfce-c132-471d-b2c8-e725c0c5b7ec-neutron-metadata-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 11 04:34:28 crc kubenswrapper[4798]: I1011 04:34:28.660054 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8b2wg\" (UniqueName: \"kubernetes.io/projected/5f08cfce-c132-471d-b2c8-e725c0c5b7ec-kube-api-access-8b2wg\") on node \"crc\" DevicePath \"\"" Oct 11 04:34:28 crc kubenswrapper[4798]: I1011 04:34:28.660072 4798 reconciler_common.go:293] "Volume detached for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5f08cfce-c132-471d-b2c8-e725c0c5b7ec-bootstrap-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 11 04:34:28 crc kubenswrapper[4798]: I1011 04:34:28.660085 4798 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/5f08cfce-c132-471d-b2c8-e725c0c5b7ec-inventory\") on node \"crc\" DevicePath \"\"" Oct 11 04:34:28 crc kubenswrapper[4798]: I1011 04:34:28.660101 4798 reconciler_common.go:293] "Volume detached for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5f08cfce-c132-471d-b2c8-e725c0c5b7ec-repo-setup-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 11 04:34:28 crc kubenswrapper[4798]: I1011 04:34:28.895269 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qgt9f" event={"ID":"5f08cfce-c132-471d-b2c8-e725c0c5b7ec","Type":"ContainerDied","Data":"ec284a37aee8db5fad5555d93b77cc3c508544b6fd8fc6535d559f8cd1c6e008"} Oct 11 04:34:28 crc kubenswrapper[4798]: I1011 04:34:28.895336 4798 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ec284a37aee8db5fad5555d93b77cc3c508544b6fd8fc6535d559f8cd1c6e008" Oct 11 04:34:28 crc kubenswrapper[4798]: I1011 04:34:28.895376 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qgt9f" Oct 11 04:34:29 crc kubenswrapper[4798]: I1011 04:34:29.009123 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-2gxwv"] Oct 11 04:34:29 crc kubenswrapper[4798]: E1011 04:34:29.009695 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5f08cfce-c132-471d-b2c8-e725c0c5b7ec" containerName="install-certs-edpm-deployment-openstack-edpm-ipam" Oct 11 04:34:29 crc kubenswrapper[4798]: I1011 04:34:29.009712 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="5f08cfce-c132-471d-b2c8-e725c0c5b7ec" containerName="install-certs-edpm-deployment-openstack-edpm-ipam" Oct 11 04:34:29 crc kubenswrapper[4798]: I1011 04:34:29.009957 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="5f08cfce-c132-471d-b2c8-e725c0c5b7ec" containerName="install-certs-edpm-deployment-openstack-edpm-ipam" Oct 11 04:34:29 crc kubenswrapper[4798]: I1011 04:34:29.010758 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-2gxwv" Oct 11 04:34:29 crc kubenswrapper[4798]: I1011 04:34:29.013915 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Oct 11 04:34:29 crc kubenswrapper[4798]: I1011 04:34:29.013976 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceph-conf-files" Oct 11 04:34:29 crc kubenswrapper[4798]: I1011 04:34:29.013932 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Oct 11 04:34:29 crc kubenswrapper[4798]: I1011 04:34:29.014620 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-mgsh6" Oct 11 04:34:29 crc kubenswrapper[4798]: I1011 04:34:29.016990 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Oct 11 04:34:29 crc kubenswrapper[4798]: I1011 04:34:29.023926 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-2gxwv"] Oct 11 04:34:29 crc kubenswrapper[4798]: I1011 04:34:29.174588 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2lfq4\" (UniqueName: \"kubernetes.io/projected/31303b8b-3524-40dd-9534-baccdc1d5a70-kube-api-access-2lfq4\") pod \"ceph-client-edpm-deployment-openstack-edpm-ipam-2gxwv\" (UID: \"31303b8b-3524-40dd-9534-baccdc1d5a70\") " pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-2gxwv" Oct 11 04:34:29 crc kubenswrapper[4798]: I1011 04:34:29.175017 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/31303b8b-3524-40dd-9534-baccdc1d5a70-ceph\") pod \"ceph-client-edpm-deployment-openstack-edpm-ipam-2gxwv\" (UID: \"31303b8b-3524-40dd-9534-baccdc1d5a70\") " pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-2gxwv" Oct 11 04:34:29 crc kubenswrapper[4798]: I1011 04:34:29.175381 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/31303b8b-3524-40dd-9534-baccdc1d5a70-ssh-key\") pod \"ceph-client-edpm-deployment-openstack-edpm-ipam-2gxwv\" (UID: \"31303b8b-3524-40dd-9534-baccdc1d5a70\") " pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-2gxwv" Oct 11 04:34:29 crc kubenswrapper[4798]: I1011 04:34:29.175665 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/31303b8b-3524-40dd-9534-baccdc1d5a70-inventory\") pod \"ceph-client-edpm-deployment-openstack-edpm-ipam-2gxwv\" (UID: \"31303b8b-3524-40dd-9534-baccdc1d5a70\") " pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-2gxwv" Oct 11 04:34:29 crc kubenswrapper[4798]: I1011 04:34:29.277146 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/31303b8b-3524-40dd-9534-baccdc1d5a70-inventory\") pod \"ceph-client-edpm-deployment-openstack-edpm-ipam-2gxwv\" (UID: \"31303b8b-3524-40dd-9534-baccdc1d5a70\") " pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-2gxwv" Oct 11 04:34:29 crc kubenswrapper[4798]: I1011 04:34:29.277212 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2lfq4\" (UniqueName: \"kubernetes.io/projected/31303b8b-3524-40dd-9534-baccdc1d5a70-kube-api-access-2lfq4\") pod \"ceph-client-edpm-deployment-openstack-edpm-ipam-2gxwv\" (UID: \"31303b8b-3524-40dd-9534-baccdc1d5a70\") " pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-2gxwv" Oct 11 04:34:29 crc kubenswrapper[4798]: I1011 04:34:29.277347 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/31303b8b-3524-40dd-9534-baccdc1d5a70-ceph\") pod \"ceph-client-edpm-deployment-openstack-edpm-ipam-2gxwv\" (UID: \"31303b8b-3524-40dd-9534-baccdc1d5a70\") " pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-2gxwv" Oct 11 04:34:29 crc kubenswrapper[4798]: I1011 04:34:29.278213 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/31303b8b-3524-40dd-9534-baccdc1d5a70-ssh-key\") pod \"ceph-client-edpm-deployment-openstack-edpm-ipam-2gxwv\" (UID: \"31303b8b-3524-40dd-9534-baccdc1d5a70\") " pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-2gxwv" Oct 11 04:34:29 crc kubenswrapper[4798]: I1011 04:34:29.282933 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/31303b8b-3524-40dd-9534-baccdc1d5a70-ssh-key\") pod \"ceph-client-edpm-deployment-openstack-edpm-ipam-2gxwv\" (UID: \"31303b8b-3524-40dd-9534-baccdc1d5a70\") " pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-2gxwv" Oct 11 04:34:29 crc kubenswrapper[4798]: I1011 04:34:29.282966 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/31303b8b-3524-40dd-9534-baccdc1d5a70-ceph\") pod \"ceph-client-edpm-deployment-openstack-edpm-ipam-2gxwv\" (UID: \"31303b8b-3524-40dd-9534-baccdc1d5a70\") " pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-2gxwv" Oct 11 04:34:29 crc kubenswrapper[4798]: I1011 04:34:29.285053 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/31303b8b-3524-40dd-9534-baccdc1d5a70-inventory\") pod \"ceph-client-edpm-deployment-openstack-edpm-ipam-2gxwv\" (UID: \"31303b8b-3524-40dd-9534-baccdc1d5a70\") " pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-2gxwv" Oct 11 04:34:29 crc kubenswrapper[4798]: I1011 04:34:29.301015 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2lfq4\" (UniqueName: \"kubernetes.io/projected/31303b8b-3524-40dd-9534-baccdc1d5a70-kube-api-access-2lfq4\") pod \"ceph-client-edpm-deployment-openstack-edpm-ipam-2gxwv\" (UID: \"31303b8b-3524-40dd-9534-baccdc1d5a70\") " pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-2gxwv" Oct 11 04:34:29 crc kubenswrapper[4798]: I1011 04:34:29.338980 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-2gxwv" Oct 11 04:34:29 crc kubenswrapper[4798]: I1011 04:34:29.918604 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-2gxwv"] Oct 11 04:34:29 crc kubenswrapper[4798]: W1011 04:34:29.931815 4798 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod31303b8b_3524_40dd_9534_baccdc1d5a70.slice/crio-e0704a2e0959d21928fa39c2ce9c0d5cafe07c4eee3c6b45bc520d591797b969 WatchSource:0}: Error finding container e0704a2e0959d21928fa39c2ce9c0d5cafe07c4eee3c6b45bc520d591797b969: Status 404 returned error can't find the container with id e0704a2e0959d21928fa39c2ce9c0d5cafe07c4eee3c6b45bc520d591797b969 Oct 11 04:34:29 crc kubenswrapper[4798]: I1011 04:34:29.944739 4798 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Oct 11 04:34:30 crc kubenswrapper[4798]: I1011 04:34:30.920285 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-2gxwv" event={"ID":"31303b8b-3524-40dd-9534-baccdc1d5a70","Type":"ContainerStarted","Data":"c1d386772b0384143f3c36a4e3cc8ca5b072f0fe3fe8abe74447b4436643f615"} Oct 11 04:34:30 crc kubenswrapper[4798]: I1011 04:34:30.921020 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-2gxwv" event={"ID":"31303b8b-3524-40dd-9534-baccdc1d5a70","Type":"ContainerStarted","Data":"e0704a2e0959d21928fa39c2ce9c0d5cafe07c4eee3c6b45bc520d591797b969"} Oct 11 04:34:30 crc kubenswrapper[4798]: I1011 04:34:30.943568 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-2gxwv" podStartSLOduration=2.406795256 podStartE2EDuration="2.943546219s" podCreationTimestamp="2025-10-11 04:34:28 +0000 UTC" firstStartedPulling="2025-10-11 04:34:29.944342718 +0000 UTC m=+2365.280632414" lastFinishedPulling="2025-10-11 04:34:30.481093691 +0000 UTC m=+2365.817383377" observedRunningTime="2025-10-11 04:34:30.939560422 +0000 UTC m=+2366.275850118" watchObservedRunningTime="2025-10-11 04:34:30.943546219 +0000 UTC m=+2366.279835905" Oct 11 04:34:34 crc kubenswrapper[4798]: I1011 04:34:34.423975 4798 scope.go:117] "RemoveContainer" containerID="e764c82b5ef4905e7c6c502957bfa38bb423ee4a4677ee706a5d2b87d33d2c1c" Oct 11 04:34:34 crc kubenswrapper[4798]: E1011 04:34:34.425208 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h28s2_openshift-machine-config-operator(42571bc8-2186-4e3b-bba9-28f5a8f364d0)\"" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" Oct 11 04:34:36 crc kubenswrapper[4798]: I1011 04:34:36.980650 4798 generic.go:334] "Generic (PLEG): container finished" podID="31303b8b-3524-40dd-9534-baccdc1d5a70" containerID="c1d386772b0384143f3c36a4e3cc8ca5b072f0fe3fe8abe74447b4436643f615" exitCode=0 Oct 11 04:34:36 crc kubenswrapper[4798]: I1011 04:34:36.980840 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-2gxwv" event={"ID":"31303b8b-3524-40dd-9534-baccdc1d5a70","Type":"ContainerDied","Data":"c1d386772b0384143f3c36a4e3cc8ca5b072f0fe3fe8abe74447b4436643f615"} Oct 11 04:34:38 crc kubenswrapper[4798]: I1011 04:34:38.486184 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-2gxwv" Oct 11 04:34:38 crc kubenswrapper[4798]: I1011 04:34:38.614428 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/31303b8b-3524-40dd-9534-baccdc1d5a70-ssh-key\") pod \"31303b8b-3524-40dd-9534-baccdc1d5a70\" (UID: \"31303b8b-3524-40dd-9534-baccdc1d5a70\") " Oct 11 04:34:38 crc kubenswrapper[4798]: I1011 04:34:38.614580 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2lfq4\" (UniqueName: \"kubernetes.io/projected/31303b8b-3524-40dd-9534-baccdc1d5a70-kube-api-access-2lfq4\") pod \"31303b8b-3524-40dd-9534-baccdc1d5a70\" (UID: \"31303b8b-3524-40dd-9534-baccdc1d5a70\") " Oct 11 04:34:38 crc kubenswrapper[4798]: I1011 04:34:38.614640 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/31303b8b-3524-40dd-9534-baccdc1d5a70-inventory\") pod \"31303b8b-3524-40dd-9534-baccdc1d5a70\" (UID: \"31303b8b-3524-40dd-9534-baccdc1d5a70\") " Oct 11 04:34:38 crc kubenswrapper[4798]: I1011 04:34:38.614679 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/31303b8b-3524-40dd-9534-baccdc1d5a70-ceph\") pod \"31303b8b-3524-40dd-9534-baccdc1d5a70\" (UID: \"31303b8b-3524-40dd-9534-baccdc1d5a70\") " Oct 11 04:34:38 crc kubenswrapper[4798]: I1011 04:34:38.625847 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/31303b8b-3524-40dd-9534-baccdc1d5a70-ceph" (OuterVolumeSpecName: "ceph") pod "31303b8b-3524-40dd-9534-baccdc1d5a70" (UID: "31303b8b-3524-40dd-9534-baccdc1d5a70"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:34:38 crc kubenswrapper[4798]: I1011 04:34:38.640419 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/31303b8b-3524-40dd-9534-baccdc1d5a70-kube-api-access-2lfq4" (OuterVolumeSpecName: "kube-api-access-2lfq4") pod "31303b8b-3524-40dd-9534-baccdc1d5a70" (UID: "31303b8b-3524-40dd-9534-baccdc1d5a70"). InnerVolumeSpecName "kube-api-access-2lfq4". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 04:34:38 crc kubenswrapper[4798]: I1011 04:34:38.707710 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/31303b8b-3524-40dd-9534-baccdc1d5a70-inventory" (OuterVolumeSpecName: "inventory") pod "31303b8b-3524-40dd-9534-baccdc1d5a70" (UID: "31303b8b-3524-40dd-9534-baccdc1d5a70"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:34:38 crc kubenswrapper[4798]: I1011 04:34:38.721676 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2lfq4\" (UniqueName: \"kubernetes.io/projected/31303b8b-3524-40dd-9534-baccdc1d5a70-kube-api-access-2lfq4\") on node \"crc\" DevicePath \"\"" Oct 11 04:34:38 crc kubenswrapper[4798]: I1011 04:34:38.721712 4798 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/31303b8b-3524-40dd-9534-baccdc1d5a70-inventory\") on node \"crc\" DevicePath \"\"" Oct 11 04:34:38 crc kubenswrapper[4798]: I1011 04:34:38.721723 4798 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/31303b8b-3524-40dd-9534-baccdc1d5a70-ceph\") on node \"crc\" DevicePath \"\"" Oct 11 04:34:38 crc kubenswrapper[4798]: I1011 04:34:38.737214 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/31303b8b-3524-40dd-9534-baccdc1d5a70-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "31303b8b-3524-40dd-9534-baccdc1d5a70" (UID: "31303b8b-3524-40dd-9534-baccdc1d5a70"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:34:38 crc kubenswrapper[4798]: I1011 04:34:38.824363 4798 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/31303b8b-3524-40dd-9534-baccdc1d5a70-ssh-key\") on node \"crc\" DevicePath \"\"" Oct 11 04:34:39 crc kubenswrapper[4798]: I1011 04:34:39.006260 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-2gxwv" event={"ID":"31303b8b-3524-40dd-9534-baccdc1d5a70","Type":"ContainerDied","Data":"e0704a2e0959d21928fa39c2ce9c0d5cafe07c4eee3c6b45bc520d591797b969"} Oct 11 04:34:39 crc kubenswrapper[4798]: I1011 04:34:39.006306 4798 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e0704a2e0959d21928fa39c2ce9c0d5cafe07c4eee3c6b45bc520d591797b969" Oct 11 04:34:39 crc kubenswrapper[4798]: I1011 04:34:39.006364 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-2gxwv" Oct 11 04:34:39 crc kubenswrapper[4798]: I1011 04:34:39.086425 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-edpm-deployment-openstack-edpm-ipam-vmqz7"] Oct 11 04:34:39 crc kubenswrapper[4798]: E1011 04:34:39.086775 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="31303b8b-3524-40dd-9534-baccdc1d5a70" containerName="ceph-client-edpm-deployment-openstack-edpm-ipam" Oct 11 04:34:39 crc kubenswrapper[4798]: I1011 04:34:39.086794 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="31303b8b-3524-40dd-9534-baccdc1d5a70" containerName="ceph-client-edpm-deployment-openstack-edpm-ipam" Oct 11 04:34:39 crc kubenswrapper[4798]: I1011 04:34:39.086978 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="31303b8b-3524-40dd-9534-baccdc1d5a70" containerName="ceph-client-edpm-deployment-openstack-edpm-ipam" Oct 11 04:34:39 crc kubenswrapper[4798]: I1011 04:34:39.087583 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-vmqz7" Oct 11 04:34:39 crc kubenswrapper[4798]: I1011 04:34:39.090864 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-config" Oct 11 04:34:39 crc kubenswrapper[4798]: I1011 04:34:39.090865 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-mgsh6" Oct 11 04:34:39 crc kubenswrapper[4798]: I1011 04:34:39.091544 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Oct 11 04:34:39 crc kubenswrapper[4798]: I1011 04:34:39.091620 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Oct 11 04:34:39 crc kubenswrapper[4798]: I1011 04:34:39.092492 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Oct 11 04:34:39 crc kubenswrapper[4798]: I1011 04:34:39.093388 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceph-conf-files" Oct 11 04:34:39 crc kubenswrapper[4798]: I1011 04:34:39.119544 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-edpm-deployment-openstack-edpm-ipam-vmqz7"] Oct 11 04:34:39 crc kubenswrapper[4798]: I1011 04:34:39.235102 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/08139862-0611-43b8-93e1-c5d9cd73e4d5-ssh-key\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-vmqz7\" (UID: \"08139862-0611-43b8-93e1-c5d9cd73e4d5\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-vmqz7" Oct 11 04:34:39 crc kubenswrapper[4798]: I1011 04:34:39.235194 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/08139862-0611-43b8-93e1-c5d9cd73e4d5-ovn-combined-ca-bundle\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-vmqz7\" (UID: \"08139862-0611-43b8-93e1-c5d9cd73e4d5\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-vmqz7" Oct 11 04:34:39 crc kubenswrapper[4798]: I1011 04:34:39.235241 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6844j\" (UniqueName: \"kubernetes.io/projected/08139862-0611-43b8-93e1-c5d9cd73e4d5-kube-api-access-6844j\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-vmqz7\" (UID: \"08139862-0611-43b8-93e1-c5d9cd73e4d5\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-vmqz7" Oct 11 04:34:39 crc kubenswrapper[4798]: I1011 04:34:39.235357 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/08139862-0611-43b8-93e1-c5d9cd73e4d5-ovncontroller-config-0\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-vmqz7\" (UID: \"08139862-0611-43b8-93e1-c5d9cd73e4d5\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-vmqz7" Oct 11 04:34:39 crc kubenswrapper[4798]: I1011 04:34:39.235420 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/08139862-0611-43b8-93e1-c5d9cd73e4d5-ceph\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-vmqz7\" (UID: \"08139862-0611-43b8-93e1-c5d9cd73e4d5\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-vmqz7" Oct 11 04:34:39 crc kubenswrapper[4798]: I1011 04:34:39.235499 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/08139862-0611-43b8-93e1-c5d9cd73e4d5-inventory\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-vmqz7\" (UID: \"08139862-0611-43b8-93e1-c5d9cd73e4d5\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-vmqz7" Oct 11 04:34:39 crc kubenswrapper[4798]: I1011 04:34:39.338348 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/08139862-0611-43b8-93e1-c5d9cd73e4d5-inventory\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-vmqz7\" (UID: \"08139862-0611-43b8-93e1-c5d9cd73e4d5\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-vmqz7" Oct 11 04:34:39 crc kubenswrapper[4798]: I1011 04:34:39.338610 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/08139862-0611-43b8-93e1-c5d9cd73e4d5-ssh-key\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-vmqz7\" (UID: \"08139862-0611-43b8-93e1-c5d9cd73e4d5\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-vmqz7" Oct 11 04:34:39 crc kubenswrapper[4798]: I1011 04:34:39.339450 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/08139862-0611-43b8-93e1-c5d9cd73e4d5-ovn-combined-ca-bundle\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-vmqz7\" (UID: \"08139862-0611-43b8-93e1-c5d9cd73e4d5\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-vmqz7" Oct 11 04:34:39 crc kubenswrapper[4798]: I1011 04:34:39.339517 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6844j\" (UniqueName: \"kubernetes.io/projected/08139862-0611-43b8-93e1-c5d9cd73e4d5-kube-api-access-6844j\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-vmqz7\" (UID: \"08139862-0611-43b8-93e1-c5d9cd73e4d5\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-vmqz7" Oct 11 04:34:39 crc kubenswrapper[4798]: I1011 04:34:39.339740 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/08139862-0611-43b8-93e1-c5d9cd73e4d5-ovncontroller-config-0\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-vmqz7\" (UID: \"08139862-0611-43b8-93e1-c5d9cd73e4d5\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-vmqz7" Oct 11 04:34:39 crc kubenswrapper[4798]: I1011 04:34:39.339820 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/08139862-0611-43b8-93e1-c5d9cd73e4d5-ceph\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-vmqz7\" (UID: \"08139862-0611-43b8-93e1-c5d9cd73e4d5\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-vmqz7" Oct 11 04:34:39 crc kubenswrapper[4798]: I1011 04:34:39.342022 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/08139862-0611-43b8-93e1-c5d9cd73e4d5-ovncontroller-config-0\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-vmqz7\" (UID: \"08139862-0611-43b8-93e1-c5d9cd73e4d5\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-vmqz7" Oct 11 04:34:39 crc kubenswrapper[4798]: I1011 04:34:39.343594 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/08139862-0611-43b8-93e1-c5d9cd73e4d5-inventory\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-vmqz7\" (UID: \"08139862-0611-43b8-93e1-c5d9cd73e4d5\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-vmqz7" Oct 11 04:34:39 crc kubenswrapper[4798]: I1011 04:34:39.343792 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/08139862-0611-43b8-93e1-c5d9cd73e4d5-ssh-key\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-vmqz7\" (UID: \"08139862-0611-43b8-93e1-c5d9cd73e4d5\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-vmqz7" Oct 11 04:34:39 crc kubenswrapper[4798]: I1011 04:34:39.344804 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/08139862-0611-43b8-93e1-c5d9cd73e4d5-ceph\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-vmqz7\" (UID: \"08139862-0611-43b8-93e1-c5d9cd73e4d5\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-vmqz7" Oct 11 04:34:39 crc kubenswrapper[4798]: I1011 04:34:39.345626 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/08139862-0611-43b8-93e1-c5d9cd73e4d5-ovn-combined-ca-bundle\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-vmqz7\" (UID: \"08139862-0611-43b8-93e1-c5d9cd73e4d5\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-vmqz7" Oct 11 04:34:39 crc kubenswrapper[4798]: I1011 04:34:39.374089 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6844j\" (UniqueName: \"kubernetes.io/projected/08139862-0611-43b8-93e1-c5d9cd73e4d5-kube-api-access-6844j\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-vmqz7\" (UID: \"08139862-0611-43b8-93e1-c5d9cd73e4d5\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-vmqz7" Oct 11 04:34:39 crc kubenswrapper[4798]: I1011 04:34:39.423344 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-vmqz7" Oct 11 04:34:40 crc kubenswrapper[4798]: I1011 04:34:40.143559 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-edpm-deployment-openstack-edpm-ipam-vmqz7"] Oct 11 04:34:40 crc kubenswrapper[4798]: W1011 04:34:40.149520 4798 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod08139862_0611_43b8_93e1_c5d9cd73e4d5.slice/crio-c2dfd3a1dcb73238d808b99fa9536f67a6f1079cccdf020a5474cb6859c4583b WatchSource:0}: Error finding container c2dfd3a1dcb73238d808b99fa9536f67a6f1079cccdf020a5474cb6859c4583b: Status 404 returned error can't find the container with id c2dfd3a1dcb73238d808b99fa9536f67a6f1079cccdf020a5474cb6859c4583b Oct 11 04:34:41 crc kubenswrapper[4798]: I1011 04:34:41.029036 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-vmqz7" event={"ID":"08139862-0611-43b8-93e1-c5d9cd73e4d5","Type":"ContainerStarted","Data":"6ff8dd83c6acf72448f0415114466a13217a8f8908a79fb1fedb5688c9787457"} Oct 11 04:34:41 crc kubenswrapper[4798]: I1011 04:34:41.029496 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-vmqz7" event={"ID":"08139862-0611-43b8-93e1-c5d9cd73e4d5","Type":"ContainerStarted","Data":"c2dfd3a1dcb73238d808b99fa9536f67a6f1079cccdf020a5474cb6859c4583b"} Oct 11 04:34:41 crc kubenswrapper[4798]: I1011 04:34:41.051071 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-vmqz7" podStartSLOduration=1.563614126 podStartE2EDuration="2.051050247s" podCreationTimestamp="2025-10-11 04:34:39 +0000 UTC" firstStartedPulling="2025-10-11 04:34:40.152861591 +0000 UTC m=+2375.489151277" lastFinishedPulling="2025-10-11 04:34:40.640297712 +0000 UTC m=+2375.976587398" observedRunningTime="2025-10-11 04:34:41.048170865 +0000 UTC m=+2376.384460551" watchObservedRunningTime="2025-10-11 04:34:41.051050247 +0000 UTC m=+2376.387339953" Oct 11 04:34:49 crc kubenswrapper[4798]: I1011 04:34:49.423974 4798 scope.go:117] "RemoveContainer" containerID="e764c82b5ef4905e7c6c502957bfa38bb423ee4a4677ee706a5d2b87d33d2c1c" Oct 11 04:34:49 crc kubenswrapper[4798]: E1011 04:34:49.424876 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h28s2_openshift-machine-config-operator(42571bc8-2186-4e3b-bba9-28f5a8f364d0)\"" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" Oct 11 04:35:04 crc kubenswrapper[4798]: I1011 04:35:04.426146 4798 scope.go:117] "RemoveContainer" containerID="e764c82b5ef4905e7c6c502957bfa38bb423ee4a4677ee706a5d2b87d33d2c1c" Oct 11 04:35:04 crc kubenswrapper[4798]: E1011 04:35:04.427833 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h28s2_openshift-machine-config-operator(42571bc8-2186-4e3b-bba9-28f5a8f364d0)\"" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" Oct 11 04:35:17 crc kubenswrapper[4798]: I1011 04:35:17.425446 4798 scope.go:117] "RemoveContainer" containerID="e764c82b5ef4905e7c6c502957bfa38bb423ee4a4677ee706a5d2b87d33d2c1c" Oct 11 04:35:17 crc kubenswrapper[4798]: E1011 04:35:17.429578 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h28s2_openshift-machine-config-operator(42571bc8-2186-4e3b-bba9-28f5a8f364d0)\"" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" Oct 11 04:35:28 crc kubenswrapper[4798]: I1011 04:35:28.425234 4798 scope.go:117] "RemoveContainer" containerID="e764c82b5ef4905e7c6c502957bfa38bb423ee4a4677ee706a5d2b87d33d2c1c" Oct 11 04:35:28 crc kubenswrapper[4798]: E1011 04:35:28.426922 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h28s2_openshift-machine-config-operator(42571bc8-2186-4e3b-bba9-28f5a8f364d0)\"" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" Oct 11 04:35:41 crc kubenswrapper[4798]: I1011 04:35:41.424711 4798 scope.go:117] "RemoveContainer" containerID="e764c82b5ef4905e7c6c502957bfa38bb423ee4a4677ee706a5d2b87d33d2c1c" Oct 11 04:35:41 crc kubenswrapper[4798]: E1011 04:35:41.426044 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h28s2_openshift-machine-config-operator(42571bc8-2186-4e3b-bba9-28f5a8f364d0)\"" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" Oct 11 04:35:52 crc kubenswrapper[4798]: I1011 04:35:52.423655 4798 scope.go:117] "RemoveContainer" containerID="e764c82b5ef4905e7c6c502957bfa38bb423ee4a4677ee706a5d2b87d33d2c1c" Oct 11 04:35:52 crc kubenswrapper[4798]: E1011 04:35:52.426337 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h28s2_openshift-machine-config-operator(42571bc8-2186-4e3b-bba9-28f5a8f364d0)\"" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" Oct 11 04:36:03 crc kubenswrapper[4798]: I1011 04:36:03.020202 4798 generic.go:334] "Generic (PLEG): container finished" podID="08139862-0611-43b8-93e1-c5d9cd73e4d5" containerID="6ff8dd83c6acf72448f0415114466a13217a8f8908a79fb1fedb5688c9787457" exitCode=0 Oct 11 04:36:03 crc kubenswrapper[4798]: I1011 04:36:03.021220 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-vmqz7" event={"ID":"08139862-0611-43b8-93e1-c5d9cd73e4d5","Type":"ContainerDied","Data":"6ff8dd83c6acf72448f0415114466a13217a8f8908a79fb1fedb5688c9787457"} Oct 11 04:36:04 crc kubenswrapper[4798]: I1011 04:36:04.424919 4798 scope.go:117] "RemoveContainer" containerID="e764c82b5ef4905e7c6c502957bfa38bb423ee4a4677ee706a5d2b87d33d2c1c" Oct 11 04:36:04 crc kubenswrapper[4798]: E1011 04:36:04.425958 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h28s2_openshift-machine-config-operator(42571bc8-2186-4e3b-bba9-28f5a8f364d0)\"" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" Oct 11 04:36:04 crc kubenswrapper[4798]: I1011 04:36:04.578230 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-vmqz7" Oct 11 04:36:04 crc kubenswrapper[4798]: I1011 04:36:04.745042 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/08139862-0611-43b8-93e1-c5d9cd73e4d5-ssh-key\") pod \"08139862-0611-43b8-93e1-c5d9cd73e4d5\" (UID: \"08139862-0611-43b8-93e1-c5d9cd73e4d5\") " Oct 11 04:36:04 crc kubenswrapper[4798]: I1011 04:36:04.745217 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6844j\" (UniqueName: \"kubernetes.io/projected/08139862-0611-43b8-93e1-c5d9cd73e4d5-kube-api-access-6844j\") pod \"08139862-0611-43b8-93e1-c5d9cd73e4d5\" (UID: \"08139862-0611-43b8-93e1-c5d9cd73e4d5\") " Oct 11 04:36:04 crc kubenswrapper[4798]: I1011 04:36:04.746541 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/08139862-0611-43b8-93e1-c5d9cd73e4d5-inventory\") pod \"08139862-0611-43b8-93e1-c5d9cd73e4d5\" (UID: \"08139862-0611-43b8-93e1-c5d9cd73e4d5\") " Oct 11 04:36:04 crc kubenswrapper[4798]: I1011 04:36:04.746715 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/08139862-0611-43b8-93e1-c5d9cd73e4d5-ceph\") pod \"08139862-0611-43b8-93e1-c5d9cd73e4d5\" (UID: \"08139862-0611-43b8-93e1-c5d9cd73e4d5\") " Oct 11 04:36:04 crc kubenswrapper[4798]: I1011 04:36:04.746772 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/08139862-0611-43b8-93e1-c5d9cd73e4d5-ovn-combined-ca-bundle\") pod \"08139862-0611-43b8-93e1-c5d9cd73e4d5\" (UID: \"08139862-0611-43b8-93e1-c5d9cd73e4d5\") " Oct 11 04:36:04 crc kubenswrapper[4798]: I1011 04:36:04.746848 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/08139862-0611-43b8-93e1-c5d9cd73e4d5-ovncontroller-config-0\") pod \"08139862-0611-43b8-93e1-c5d9cd73e4d5\" (UID: \"08139862-0611-43b8-93e1-c5d9cd73e4d5\") " Oct 11 04:36:04 crc kubenswrapper[4798]: I1011 04:36:04.756754 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/08139862-0611-43b8-93e1-c5d9cd73e4d5-kube-api-access-6844j" (OuterVolumeSpecName: "kube-api-access-6844j") pod "08139862-0611-43b8-93e1-c5d9cd73e4d5" (UID: "08139862-0611-43b8-93e1-c5d9cd73e4d5"). InnerVolumeSpecName "kube-api-access-6844j". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 04:36:04 crc kubenswrapper[4798]: I1011 04:36:04.764739 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/08139862-0611-43b8-93e1-c5d9cd73e4d5-ceph" (OuterVolumeSpecName: "ceph") pod "08139862-0611-43b8-93e1-c5d9cd73e4d5" (UID: "08139862-0611-43b8-93e1-c5d9cd73e4d5"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:36:04 crc kubenswrapper[4798]: I1011 04:36:04.765763 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/08139862-0611-43b8-93e1-c5d9cd73e4d5-ovn-combined-ca-bundle" (OuterVolumeSpecName: "ovn-combined-ca-bundle") pod "08139862-0611-43b8-93e1-c5d9cd73e4d5" (UID: "08139862-0611-43b8-93e1-c5d9cd73e4d5"). InnerVolumeSpecName "ovn-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:36:04 crc kubenswrapper[4798]: I1011 04:36:04.782879 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/08139862-0611-43b8-93e1-c5d9cd73e4d5-ovncontroller-config-0" (OuterVolumeSpecName: "ovncontroller-config-0") pod "08139862-0611-43b8-93e1-c5d9cd73e4d5" (UID: "08139862-0611-43b8-93e1-c5d9cd73e4d5"). InnerVolumeSpecName "ovncontroller-config-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 04:36:04 crc kubenswrapper[4798]: I1011 04:36:04.797464 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/08139862-0611-43b8-93e1-c5d9cd73e4d5-inventory" (OuterVolumeSpecName: "inventory") pod "08139862-0611-43b8-93e1-c5d9cd73e4d5" (UID: "08139862-0611-43b8-93e1-c5d9cd73e4d5"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:36:04 crc kubenswrapper[4798]: I1011 04:36:04.808573 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/08139862-0611-43b8-93e1-c5d9cd73e4d5-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "08139862-0611-43b8-93e1-c5d9cd73e4d5" (UID: "08139862-0611-43b8-93e1-c5d9cd73e4d5"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:36:04 crc kubenswrapper[4798]: I1011 04:36:04.850509 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6844j\" (UniqueName: \"kubernetes.io/projected/08139862-0611-43b8-93e1-c5d9cd73e4d5-kube-api-access-6844j\") on node \"crc\" DevicePath \"\"" Oct 11 04:36:04 crc kubenswrapper[4798]: I1011 04:36:04.850609 4798 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/08139862-0611-43b8-93e1-c5d9cd73e4d5-inventory\") on node \"crc\" DevicePath \"\"" Oct 11 04:36:04 crc kubenswrapper[4798]: I1011 04:36:04.850630 4798 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/08139862-0611-43b8-93e1-c5d9cd73e4d5-ceph\") on node \"crc\" DevicePath \"\"" Oct 11 04:36:04 crc kubenswrapper[4798]: I1011 04:36:04.850650 4798 reconciler_common.go:293] "Volume detached for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/08139862-0611-43b8-93e1-c5d9cd73e4d5-ovn-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 11 04:36:04 crc kubenswrapper[4798]: I1011 04:36:04.850671 4798 reconciler_common.go:293] "Volume detached for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/08139862-0611-43b8-93e1-c5d9cd73e4d5-ovncontroller-config-0\") on node \"crc\" DevicePath \"\"" Oct 11 04:36:04 crc kubenswrapper[4798]: I1011 04:36:04.850689 4798 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/08139862-0611-43b8-93e1-c5d9cd73e4d5-ssh-key\") on node \"crc\" DevicePath \"\"" Oct 11 04:36:05 crc kubenswrapper[4798]: I1011 04:36:05.053842 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-vmqz7" event={"ID":"08139862-0611-43b8-93e1-c5d9cd73e4d5","Type":"ContainerDied","Data":"c2dfd3a1dcb73238d808b99fa9536f67a6f1079cccdf020a5474cb6859c4583b"} Oct 11 04:36:05 crc kubenswrapper[4798]: I1011 04:36:05.053913 4798 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c2dfd3a1dcb73238d808b99fa9536f67a6f1079cccdf020a5474cb6859c4583b" Oct 11 04:36:05 crc kubenswrapper[4798]: I1011 04:36:05.053970 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-vmqz7" Oct 11 04:36:05 crc kubenswrapper[4798]: I1011 04:36:05.183715 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-k6ztk"] Oct 11 04:36:05 crc kubenswrapper[4798]: E1011 04:36:05.184239 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="08139862-0611-43b8-93e1-c5d9cd73e4d5" containerName="ovn-edpm-deployment-openstack-edpm-ipam" Oct 11 04:36:05 crc kubenswrapper[4798]: I1011 04:36:05.184265 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="08139862-0611-43b8-93e1-c5d9cd73e4d5" containerName="ovn-edpm-deployment-openstack-edpm-ipam" Oct 11 04:36:05 crc kubenswrapper[4798]: I1011 04:36:05.186628 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="08139862-0611-43b8-93e1-c5d9cd73e4d5" containerName="ovn-edpm-deployment-openstack-edpm-ipam" Oct 11 04:36:05 crc kubenswrapper[4798]: I1011 04:36:05.187607 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-k6ztk" Oct 11 04:36:05 crc kubenswrapper[4798]: I1011 04:36:05.190312 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Oct 11 04:36:05 crc kubenswrapper[4798]: I1011 04:36:05.190406 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceph-conf-files" Oct 11 04:36:05 crc kubenswrapper[4798]: I1011 04:36:05.191269 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-neutron-config" Oct 11 04:36:05 crc kubenswrapper[4798]: I1011 04:36:05.191275 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-mgsh6" Oct 11 04:36:05 crc kubenswrapper[4798]: I1011 04:36:05.191288 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Oct 11 04:36:05 crc kubenswrapper[4798]: I1011 04:36:05.191361 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Oct 11 04:36:05 crc kubenswrapper[4798]: I1011 04:36:05.191354 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-ovn-metadata-agent-neutron-config" Oct 11 04:36:05 crc kubenswrapper[4798]: I1011 04:36:05.207366 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-k6ztk"] Oct 11 04:36:05 crc kubenswrapper[4798]: I1011 04:36:05.367087 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ad721bd6-8a4d-47c7-b544-4d6c480d6fd2-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-k6ztk\" (UID: \"ad721bd6-8a4d-47c7-b544-4d6c480d6fd2\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-k6ztk" Oct 11 04:36:05 crc kubenswrapper[4798]: I1011 04:36:05.367927 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ad721bd6-8a4d-47c7-b544-4d6c480d6fd2-inventory\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-k6ztk\" (UID: \"ad721bd6-8a4d-47c7-b544-4d6c480d6fd2\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-k6ztk" Oct 11 04:36:05 crc kubenswrapper[4798]: I1011 04:36:05.367961 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/ad721bd6-8a4d-47c7-b544-4d6c480d6fd2-nova-metadata-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-k6ztk\" (UID: \"ad721bd6-8a4d-47c7-b544-4d6c480d6fd2\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-k6ztk" Oct 11 04:36:05 crc kubenswrapper[4798]: I1011 04:36:05.368124 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/ad721bd6-8a4d-47c7-b544-4d6c480d6fd2-ceph\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-k6ztk\" (UID: \"ad721bd6-8a4d-47c7-b544-4d6c480d6fd2\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-k6ztk" Oct 11 04:36:05 crc kubenswrapper[4798]: I1011 04:36:05.368235 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ad721bd6-8a4d-47c7-b544-4d6c480d6fd2-ssh-key\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-k6ztk\" (UID: \"ad721bd6-8a4d-47c7-b544-4d6c480d6fd2\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-k6ztk" Oct 11 04:36:05 crc kubenswrapper[4798]: I1011 04:36:05.368522 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/ad721bd6-8a4d-47c7-b544-4d6c480d6fd2-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-k6ztk\" (UID: \"ad721bd6-8a4d-47c7-b544-4d6c480d6fd2\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-k6ztk" Oct 11 04:36:05 crc kubenswrapper[4798]: I1011 04:36:05.369496 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bmkdx\" (UniqueName: \"kubernetes.io/projected/ad721bd6-8a4d-47c7-b544-4d6c480d6fd2-kube-api-access-bmkdx\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-k6ztk\" (UID: \"ad721bd6-8a4d-47c7-b544-4d6c480d6fd2\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-k6ztk" Oct 11 04:36:05 crc kubenswrapper[4798]: I1011 04:36:05.471563 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ad721bd6-8a4d-47c7-b544-4d6c480d6fd2-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-k6ztk\" (UID: \"ad721bd6-8a4d-47c7-b544-4d6c480d6fd2\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-k6ztk" Oct 11 04:36:05 crc kubenswrapper[4798]: I1011 04:36:05.471655 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ad721bd6-8a4d-47c7-b544-4d6c480d6fd2-inventory\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-k6ztk\" (UID: \"ad721bd6-8a4d-47c7-b544-4d6c480d6fd2\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-k6ztk" Oct 11 04:36:05 crc kubenswrapper[4798]: I1011 04:36:05.471688 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/ad721bd6-8a4d-47c7-b544-4d6c480d6fd2-nova-metadata-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-k6ztk\" (UID: \"ad721bd6-8a4d-47c7-b544-4d6c480d6fd2\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-k6ztk" Oct 11 04:36:05 crc kubenswrapper[4798]: I1011 04:36:05.471718 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/ad721bd6-8a4d-47c7-b544-4d6c480d6fd2-ceph\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-k6ztk\" (UID: \"ad721bd6-8a4d-47c7-b544-4d6c480d6fd2\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-k6ztk" Oct 11 04:36:05 crc kubenswrapper[4798]: I1011 04:36:05.471740 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ad721bd6-8a4d-47c7-b544-4d6c480d6fd2-ssh-key\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-k6ztk\" (UID: \"ad721bd6-8a4d-47c7-b544-4d6c480d6fd2\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-k6ztk" Oct 11 04:36:05 crc kubenswrapper[4798]: I1011 04:36:05.473481 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/ad721bd6-8a4d-47c7-b544-4d6c480d6fd2-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-k6ztk\" (UID: \"ad721bd6-8a4d-47c7-b544-4d6c480d6fd2\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-k6ztk" Oct 11 04:36:05 crc kubenswrapper[4798]: I1011 04:36:05.473648 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bmkdx\" (UniqueName: \"kubernetes.io/projected/ad721bd6-8a4d-47c7-b544-4d6c480d6fd2-kube-api-access-bmkdx\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-k6ztk\" (UID: \"ad721bd6-8a4d-47c7-b544-4d6c480d6fd2\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-k6ztk" Oct 11 04:36:05 crc kubenswrapper[4798]: I1011 04:36:05.476833 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-neutron-config" Oct 11 04:36:05 crc kubenswrapper[4798]: I1011 04:36:05.477942 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ad721bd6-8a4d-47c7-b544-4d6c480d6fd2-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-k6ztk\" (UID: \"ad721bd6-8a4d-47c7-b544-4d6c480d6fd2\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-k6ztk" Oct 11 04:36:05 crc kubenswrapper[4798]: I1011 04:36:05.486047 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Oct 11 04:36:05 crc kubenswrapper[4798]: I1011 04:36:05.487100 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceph-conf-files" Oct 11 04:36:05 crc kubenswrapper[4798]: I1011 04:36:05.487363 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/ad721bd6-8a4d-47c7-b544-4d6c480d6fd2-nova-metadata-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-k6ztk\" (UID: \"ad721bd6-8a4d-47c7-b544-4d6c480d6fd2\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-k6ztk" Oct 11 04:36:05 crc kubenswrapper[4798]: I1011 04:36:05.487578 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Oct 11 04:36:05 crc kubenswrapper[4798]: I1011 04:36:05.487660 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-ovn-metadata-agent-neutron-config" Oct 11 04:36:05 crc kubenswrapper[4798]: I1011 04:36:05.496541 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ad721bd6-8a4d-47c7-b544-4d6c480d6fd2-ssh-key\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-k6ztk\" (UID: \"ad721bd6-8a4d-47c7-b544-4d6c480d6fd2\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-k6ztk" Oct 11 04:36:05 crc kubenswrapper[4798]: I1011 04:36:05.496723 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ad721bd6-8a4d-47c7-b544-4d6c480d6fd2-inventory\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-k6ztk\" (UID: \"ad721bd6-8a4d-47c7-b544-4d6c480d6fd2\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-k6ztk" Oct 11 04:36:05 crc kubenswrapper[4798]: I1011 04:36:05.498077 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/ad721bd6-8a4d-47c7-b544-4d6c480d6fd2-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-k6ztk\" (UID: \"ad721bd6-8a4d-47c7-b544-4d6c480d6fd2\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-k6ztk" Oct 11 04:36:05 crc kubenswrapper[4798]: I1011 04:36:05.501146 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/ad721bd6-8a4d-47c7-b544-4d6c480d6fd2-ceph\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-k6ztk\" (UID: \"ad721bd6-8a4d-47c7-b544-4d6c480d6fd2\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-k6ztk" Oct 11 04:36:05 crc kubenswrapper[4798]: I1011 04:36:05.504384 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bmkdx\" (UniqueName: \"kubernetes.io/projected/ad721bd6-8a4d-47c7-b544-4d6c480d6fd2-kube-api-access-bmkdx\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-k6ztk\" (UID: \"ad721bd6-8a4d-47c7-b544-4d6c480d6fd2\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-k6ztk" Oct 11 04:36:05 crc kubenswrapper[4798]: I1011 04:36:05.520006 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-mgsh6" Oct 11 04:36:05 crc kubenswrapper[4798]: I1011 04:36:05.528306 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-k6ztk" Oct 11 04:36:05 crc kubenswrapper[4798]: I1011 04:36:05.944151 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-k6ztk"] Oct 11 04:36:06 crc kubenswrapper[4798]: I1011 04:36:06.070747 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-k6ztk" event={"ID":"ad721bd6-8a4d-47c7-b544-4d6c480d6fd2","Type":"ContainerStarted","Data":"3cee002717006b8c4bdc9cda01bcd958ea4fad3b853cc3e6d3a14aaf80c77bfc"} Oct 11 04:36:06 crc kubenswrapper[4798]: I1011 04:36:06.631747 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Oct 11 04:36:07 crc kubenswrapper[4798]: I1011 04:36:07.083583 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-k6ztk" event={"ID":"ad721bd6-8a4d-47c7-b544-4d6c480d6fd2","Type":"ContainerStarted","Data":"1474e68523cd92734c5b2bed3dc5d951e7be5faace6ca73a0cdcd800500b72e8"} Oct 11 04:36:07 crc kubenswrapper[4798]: I1011 04:36:07.108083 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-k6ztk" podStartSLOduration=1.4608471760000001 podStartE2EDuration="2.108054098s" podCreationTimestamp="2025-10-11 04:36:05 +0000 UTC" firstStartedPulling="2025-10-11 04:36:05.978308652 +0000 UTC m=+2461.314598338" lastFinishedPulling="2025-10-11 04:36:06.625515544 +0000 UTC m=+2461.961805260" observedRunningTime="2025-10-11 04:36:07.102720912 +0000 UTC m=+2462.439010618" watchObservedRunningTime="2025-10-11 04:36:07.108054098 +0000 UTC m=+2462.444343794" Oct 11 04:36:16 crc kubenswrapper[4798]: I1011 04:36:16.424382 4798 scope.go:117] "RemoveContainer" containerID="e764c82b5ef4905e7c6c502957bfa38bb423ee4a4677ee706a5d2b87d33d2c1c" Oct 11 04:36:16 crc kubenswrapper[4798]: E1011 04:36:16.425568 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h28s2_openshift-machine-config-operator(42571bc8-2186-4e3b-bba9-28f5a8f364d0)\"" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" Oct 11 04:36:27 crc kubenswrapper[4798]: I1011 04:36:27.424758 4798 scope.go:117] "RemoveContainer" containerID="e764c82b5ef4905e7c6c502957bfa38bb423ee4a4677ee706a5d2b87d33d2c1c" Oct 11 04:36:28 crc kubenswrapper[4798]: I1011 04:36:28.343919 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" event={"ID":"42571bc8-2186-4e3b-bba9-28f5a8f364d0","Type":"ContainerStarted","Data":"cfcd870e30121e7648d377b94f1af550547a76a44c2be1c2cfee763d9b8be3df"} Oct 11 04:37:09 crc kubenswrapper[4798]: I1011 04:37:09.844348 4798 generic.go:334] "Generic (PLEG): container finished" podID="ad721bd6-8a4d-47c7-b544-4d6c480d6fd2" containerID="1474e68523cd92734c5b2bed3dc5d951e7be5faace6ca73a0cdcd800500b72e8" exitCode=0 Oct 11 04:37:09 crc kubenswrapper[4798]: I1011 04:37:09.844460 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-k6ztk" event={"ID":"ad721bd6-8a4d-47c7-b544-4d6c480d6fd2","Type":"ContainerDied","Data":"1474e68523cd92734c5b2bed3dc5d951e7be5faace6ca73a0cdcd800500b72e8"} Oct 11 04:37:11 crc kubenswrapper[4798]: I1011 04:37:11.372131 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-k6ztk" Oct 11 04:37:11 crc kubenswrapper[4798]: I1011 04:37:11.530946 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/ad721bd6-8a4d-47c7-b544-4d6c480d6fd2-nova-metadata-neutron-config-0\") pod \"ad721bd6-8a4d-47c7-b544-4d6c480d6fd2\" (UID: \"ad721bd6-8a4d-47c7-b544-4d6c480d6fd2\") " Oct 11 04:37:11 crc kubenswrapper[4798]: I1011 04:37:11.531006 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ad721bd6-8a4d-47c7-b544-4d6c480d6fd2-inventory\") pod \"ad721bd6-8a4d-47c7-b544-4d6c480d6fd2\" (UID: \"ad721bd6-8a4d-47c7-b544-4d6c480d6fd2\") " Oct 11 04:37:11 crc kubenswrapper[4798]: I1011 04:37:11.531067 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ad721bd6-8a4d-47c7-b544-4d6c480d6fd2-ssh-key\") pod \"ad721bd6-8a4d-47c7-b544-4d6c480d6fd2\" (UID: \"ad721bd6-8a4d-47c7-b544-4d6c480d6fd2\") " Oct 11 04:37:11 crc kubenswrapper[4798]: I1011 04:37:11.531091 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/ad721bd6-8a4d-47c7-b544-4d6c480d6fd2-ceph\") pod \"ad721bd6-8a4d-47c7-b544-4d6c480d6fd2\" (UID: \"ad721bd6-8a4d-47c7-b544-4d6c480d6fd2\") " Oct 11 04:37:11 crc kubenswrapper[4798]: I1011 04:37:11.531163 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/ad721bd6-8a4d-47c7-b544-4d6c480d6fd2-neutron-ovn-metadata-agent-neutron-config-0\") pod \"ad721bd6-8a4d-47c7-b544-4d6c480d6fd2\" (UID: \"ad721bd6-8a4d-47c7-b544-4d6c480d6fd2\") " Oct 11 04:37:11 crc kubenswrapper[4798]: I1011 04:37:11.531219 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bmkdx\" (UniqueName: \"kubernetes.io/projected/ad721bd6-8a4d-47c7-b544-4d6c480d6fd2-kube-api-access-bmkdx\") pod \"ad721bd6-8a4d-47c7-b544-4d6c480d6fd2\" (UID: \"ad721bd6-8a4d-47c7-b544-4d6c480d6fd2\") " Oct 11 04:37:11 crc kubenswrapper[4798]: I1011 04:37:11.531296 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ad721bd6-8a4d-47c7-b544-4d6c480d6fd2-neutron-metadata-combined-ca-bundle\") pod \"ad721bd6-8a4d-47c7-b544-4d6c480d6fd2\" (UID: \"ad721bd6-8a4d-47c7-b544-4d6c480d6fd2\") " Oct 11 04:37:11 crc kubenswrapper[4798]: I1011 04:37:11.545828 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ad721bd6-8a4d-47c7-b544-4d6c480d6fd2-ceph" (OuterVolumeSpecName: "ceph") pod "ad721bd6-8a4d-47c7-b544-4d6c480d6fd2" (UID: "ad721bd6-8a4d-47c7-b544-4d6c480d6fd2"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:37:11 crc kubenswrapper[4798]: I1011 04:37:11.547219 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ad721bd6-8a4d-47c7-b544-4d6c480d6fd2-kube-api-access-bmkdx" (OuterVolumeSpecName: "kube-api-access-bmkdx") pod "ad721bd6-8a4d-47c7-b544-4d6c480d6fd2" (UID: "ad721bd6-8a4d-47c7-b544-4d6c480d6fd2"). InnerVolumeSpecName "kube-api-access-bmkdx". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 04:37:11 crc kubenswrapper[4798]: I1011 04:37:11.551004 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ad721bd6-8a4d-47c7-b544-4d6c480d6fd2-neutron-metadata-combined-ca-bundle" (OuterVolumeSpecName: "neutron-metadata-combined-ca-bundle") pod "ad721bd6-8a4d-47c7-b544-4d6c480d6fd2" (UID: "ad721bd6-8a4d-47c7-b544-4d6c480d6fd2"). InnerVolumeSpecName "neutron-metadata-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:37:11 crc kubenswrapper[4798]: I1011 04:37:11.567840 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ad721bd6-8a4d-47c7-b544-4d6c480d6fd2-neutron-ovn-metadata-agent-neutron-config-0" (OuterVolumeSpecName: "neutron-ovn-metadata-agent-neutron-config-0") pod "ad721bd6-8a4d-47c7-b544-4d6c480d6fd2" (UID: "ad721bd6-8a4d-47c7-b544-4d6c480d6fd2"). InnerVolumeSpecName "neutron-ovn-metadata-agent-neutron-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:37:11 crc kubenswrapper[4798]: I1011 04:37:11.569216 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ad721bd6-8a4d-47c7-b544-4d6c480d6fd2-nova-metadata-neutron-config-0" (OuterVolumeSpecName: "nova-metadata-neutron-config-0") pod "ad721bd6-8a4d-47c7-b544-4d6c480d6fd2" (UID: "ad721bd6-8a4d-47c7-b544-4d6c480d6fd2"). InnerVolumeSpecName "nova-metadata-neutron-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:37:11 crc kubenswrapper[4798]: I1011 04:37:11.569992 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ad721bd6-8a4d-47c7-b544-4d6c480d6fd2-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "ad721bd6-8a4d-47c7-b544-4d6c480d6fd2" (UID: "ad721bd6-8a4d-47c7-b544-4d6c480d6fd2"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:37:11 crc kubenswrapper[4798]: I1011 04:37:11.577679 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ad721bd6-8a4d-47c7-b544-4d6c480d6fd2-inventory" (OuterVolumeSpecName: "inventory") pod "ad721bd6-8a4d-47c7-b544-4d6c480d6fd2" (UID: "ad721bd6-8a4d-47c7-b544-4d6c480d6fd2"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:37:11 crc kubenswrapper[4798]: I1011 04:37:11.634195 4798 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ad721bd6-8a4d-47c7-b544-4d6c480d6fd2-ssh-key\") on node \"crc\" DevicePath \"\"" Oct 11 04:37:11 crc kubenswrapper[4798]: I1011 04:37:11.634379 4798 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/ad721bd6-8a4d-47c7-b544-4d6c480d6fd2-ceph\") on node \"crc\" DevicePath \"\"" Oct 11 04:37:11 crc kubenswrapper[4798]: I1011 04:37:11.634505 4798 reconciler_common.go:293] "Volume detached for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/ad721bd6-8a4d-47c7-b544-4d6c480d6fd2-neutron-ovn-metadata-agent-neutron-config-0\") on node \"crc\" DevicePath \"\"" Oct 11 04:37:11 crc kubenswrapper[4798]: I1011 04:37:11.634615 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bmkdx\" (UniqueName: \"kubernetes.io/projected/ad721bd6-8a4d-47c7-b544-4d6c480d6fd2-kube-api-access-bmkdx\") on node \"crc\" DevicePath \"\"" Oct 11 04:37:11 crc kubenswrapper[4798]: I1011 04:37:11.634703 4798 reconciler_common.go:293] "Volume detached for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ad721bd6-8a4d-47c7-b544-4d6c480d6fd2-neutron-metadata-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 11 04:37:11 crc kubenswrapper[4798]: I1011 04:37:11.634795 4798 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/ad721bd6-8a4d-47c7-b544-4d6c480d6fd2-nova-metadata-neutron-config-0\") on node \"crc\" DevicePath \"\"" Oct 11 04:37:11 crc kubenswrapper[4798]: I1011 04:37:11.634881 4798 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ad721bd6-8a4d-47c7-b544-4d6c480d6fd2-inventory\") on node \"crc\" DevicePath \"\"" Oct 11 04:37:11 crc kubenswrapper[4798]: I1011 04:37:11.886098 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-k6ztk" event={"ID":"ad721bd6-8a4d-47c7-b544-4d6c480d6fd2","Type":"ContainerDied","Data":"3cee002717006b8c4bdc9cda01bcd958ea4fad3b853cc3e6d3a14aaf80c77bfc"} Oct 11 04:37:11 crc kubenswrapper[4798]: I1011 04:37:11.886713 4798 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3cee002717006b8c4bdc9cda01bcd958ea4fad3b853cc3e6d3a14aaf80c77bfc" Oct 11 04:37:11 crc kubenswrapper[4798]: I1011 04:37:11.886453 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-k6ztk" Oct 11 04:37:11 crc kubenswrapper[4798]: I1011 04:37:11.978989 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/libvirt-edpm-deployment-openstack-edpm-ipam-649lj"] Oct 11 04:37:11 crc kubenswrapper[4798]: E1011 04:37:11.979446 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ad721bd6-8a4d-47c7-b544-4d6c480d6fd2" containerName="neutron-metadata-edpm-deployment-openstack-edpm-ipam" Oct 11 04:37:11 crc kubenswrapper[4798]: I1011 04:37:11.979467 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="ad721bd6-8a4d-47c7-b544-4d6c480d6fd2" containerName="neutron-metadata-edpm-deployment-openstack-edpm-ipam" Oct 11 04:37:11 crc kubenswrapper[4798]: I1011 04:37:11.979665 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="ad721bd6-8a4d-47c7-b544-4d6c480d6fd2" containerName="neutron-metadata-edpm-deployment-openstack-edpm-ipam" Oct 11 04:37:11 crc kubenswrapper[4798]: I1011 04:37:11.981711 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-649lj" Oct 11 04:37:11 crc kubenswrapper[4798]: I1011 04:37:11.984443 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceph-conf-files" Oct 11 04:37:11 crc kubenswrapper[4798]: I1011 04:37:11.984602 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Oct 11 04:37:11 crc kubenswrapper[4798]: I1011 04:37:11.985265 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"libvirt-secret" Oct 11 04:37:11 crc kubenswrapper[4798]: I1011 04:37:11.985363 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-mgsh6" Oct 11 04:37:11 crc kubenswrapper[4798]: I1011 04:37:11.985563 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Oct 11 04:37:11 crc kubenswrapper[4798]: I1011 04:37:11.986078 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Oct 11 04:37:12 crc kubenswrapper[4798]: I1011 04:37:12.041950 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/libvirt-edpm-deployment-openstack-edpm-ipam-649lj"] Oct 11 04:37:12 crc kubenswrapper[4798]: I1011 04:37:12.145762 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8550f028-bbd9-455c-9c93-0e813f2a95ed-inventory\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-649lj\" (UID: \"8550f028-bbd9-455c-9c93-0e813f2a95ed\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-649lj" Oct 11 04:37:12 crc kubenswrapper[4798]: I1011 04:37:12.145841 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/8550f028-bbd9-455c-9c93-0e813f2a95ed-libvirt-secret-0\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-649lj\" (UID: \"8550f028-bbd9-455c-9c93-0e813f2a95ed\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-649lj" Oct 11 04:37:12 crc kubenswrapper[4798]: I1011 04:37:12.145948 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/8550f028-bbd9-455c-9c93-0e813f2a95ed-ceph\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-649lj\" (UID: \"8550f028-bbd9-455c-9c93-0e813f2a95ed\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-649lj" Oct 11 04:37:12 crc kubenswrapper[4798]: I1011 04:37:12.146109 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6ngl8\" (UniqueName: \"kubernetes.io/projected/8550f028-bbd9-455c-9c93-0e813f2a95ed-kube-api-access-6ngl8\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-649lj\" (UID: \"8550f028-bbd9-455c-9c93-0e813f2a95ed\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-649lj" Oct 11 04:37:12 crc kubenswrapper[4798]: I1011 04:37:12.146257 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8550f028-bbd9-455c-9c93-0e813f2a95ed-libvirt-combined-ca-bundle\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-649lj\" (UID: \"8550f028-bbd9-455c-9c93-0e813f2a95ed\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-649lj" Oct 11 04:37:12 crc kubenswrapper[4798]: I1011 04:37:12.146342 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/8550f028-bbd9-455c-9c93-0e813f2a95ed-ssh-key\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-649lj\" (UID: \"8550f028-bbd9-455c-9c93-0e813f2a95ed\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-649lj" Oct 11 04:37:12 crc kubenswrapper[4798]: I1011 04:37:12.248583 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/8550f028-bbd9-455c-9c93-0e813f2a95ed-ceph\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-649lj\" (UID: \"8550f028-bbd9-455c-9c93-0e813f2a95ed\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-649lj" Oct 11 04:37:12 crc kubenswrapper[4798]: I1011 04:37:12.248646 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6ngl8\" (UniqueName: \"kubernetes.io/projected/8550f028-bbd9-455c-9c93-0e813f2a95ed-kube-api-access-6ngl8\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-649lj\" (UID: \"8550f028-bbd9-455c-9c93-0e813f2a95ed\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-649lj" Oct 11 04:37:12 crc kubenswrapper[4798]: I1011 04:37:12.248689 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8550f028-bbd9-455c-9c93-0e813f2a95ed-libvirt-combined-ca-bundle\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-649lj\" (UID: \"8550f028-bbd9-455c-9c93-0e813f2a95ed\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-649lj" Oct 11 04:37:12 crc kubenswrapper[4798]: I1011 04:37:12.248720 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/8550f028-bbd9-455c-9c93-0e813f2a95ed-ssh-key\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-649lj\" (UID: \"8550f028-bbd9-455c-9c93-0e813f2a95ed\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-649lj" Oct 11 04:37:12 crc kubenswrapper[4798]: I1011 04:37:12.248813 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8550f028-bbd9-455c-9c93-0e813f2a95ed-inventory\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-649lj\" (UID: \"8550f028-bbd9-455c-9c93-0e813f2a95ed\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-649lj" Oct 11 04:37:12 crc kubenswrapper[4798]: I1011 04:37:12.248852 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/8550f028-bbd9-455c-9c93-0e813f2a95ed-libvirt-secret-0\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-649lj\" (UID: \"8550f028-bbd9-455c-9c93-0e813f2a95ed\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-649lj" Oct 11 04:37:12 crc kubenswrapper[4798]: I1011 04:37:12.256010 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8550f028-bbd9-455c-9c93-0e813f2a95ed-inventory\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-649lj\" (UID: \"8550f028-bbd9-455c-9c93-0e813f2a95ed\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-649lj" Oct 11 04:37:12 crc kubenswrapper[4798]: I1011 04:37:12.256227 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/8550f028-bbd9-455c-9c93-0e813f2a95ed-ceph\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-649lj\" (UID: \"8550f028-bbd9-455c-9c93-0e813f2a95ed\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-649lj" Oct 11 04:37:12 crc kubenswrapper[4798]: I1011 04:37:12.257891 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/8550f028-bbd9-455c-9c93-0e813f2a95ed-ssh-key\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-649lj\" (UID: \"8550f028-bbd9-455c-9c93-0e813f2a95ed\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-649lj" Oct 11 04:37:12 crc kubenswrapper[4798]: I1011 04:37:12.261778 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8550f028-bbd9-455c-9c93-0e813f2a95ed-libvirt-combined-ca-bundle\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-649lj\" (UID: \"8550f028-bbd9-455c-9c93-0e813f2a95ed\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-649lj" Oct 11 04:37:12 crc kubenswrapper[4798]: I1011 04:37:12.262794 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/8550f028-bbd9-455c-9c93-0e813f2a95ed-libvirt-secret-0\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-649lj\" (UID: \"8550f028-bbd9-455c-9c93-0e813f2a95ed\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-649lj" Oct 11 04:37:12 crc kubenswrapper[4798]: I1011 04:37:12.280071 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6ngl8\" (UniqueName: \"kubernetes.io/projected/8550f028-bbd9-455c-9c93-0e813f2a95ed-kube-api-access-6ngl8\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-649lj\" (UID: \"8550f028-bbd9-455c-9c93-0e813f2a95ed\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-649lj" Oct 11 04:37:12 crc kubenswrapper[4798]: I1011 04:37:12.337038 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-649lj" Oct 11 04:37:13 crc kubenswrapper[4798]: I1011 04:37:13.009385 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/libvirt-edpm-deployment-openstack-edpm-ipam-649lj"] Oct 11 04:37:13 crc kubenswrapper[4798]: I1011 04:37:13.912135 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-649lj" event={"ID":"8550f028-bbd9-455c-9c93-0e813f2a95ed","Type":"ContainerStarted","Data":"96c7ca14409540dac953477baee2bab81c7f3f91fe56dc813e366ef2a1148c48"} Oct 11 04:37:13 crc kubenswrapper[4798]: I1011 04:37:13.913071 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-649lj" event={"ID":"8550f028-bbd9-455c-9c93-0e813f2a95ed","Type":"ContainerStarted","Data":"def858fe9c3019c87ce9083256d45903bebf258278cd7bbbc07362804d8070d4"} Oct 11 04:37:13 crc kubenswrapper[4798]: I1011 04:37:13.937641 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-649lj" podStartSLOduration=2.392313775 podStartE2EDuration="2.937611773s" podCreationTimestamp="2025-10-11 04:37:11 +0000 UTC" firstStartedPulling="2025-10-11 04:37:13.029883812 +0000 UTC m=+2528.366173498" lastFinishedPulling="2025-10-11 04:37:13.57518181 +0000 UTC m=+2528.911471496" observedRunningTime="2025-10-11 04:37:13.929367399 +0000 UTC m=+2529.265657125" watchObservedRunningTime="2025-10-11 04:37:13.937611773 +0000 UTC m=+2529.273901459" Oct 11 04:38:27 crc kubenswrapper[4798]: I1011 04:38:27.138998 4798 patch_prober.go:28] interesting pod/machine-config-daemon-h28s2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 11 04:38:27 crc kubenswrapper[4798]: I1011 04:38:27.139825 4798 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 11 04:38:57 crc kubenswrapper[4798]: I1011 04:38:57.138781 4798 patch_prober.go:28] interesting pod/machine-config-daemon-h28s2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 11 04:38:57 crc kubenswrapper[4798]: I1011 04:38:57.139692 4798 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 11 04:39:27 crc kubenswrapper[4798]: I1011 04:39:27.139007 4798 patch_prober.go:28] interesting pod/machine-config-daemon-h28s2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 11 04:39:27 crc kubenswrapper[4798]: I1011 04:39:27.140007 4798 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 11 04:39:27 crc kubenswrapper[4798]: I1011 04:39:27.140122 4798 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" Oct 11 04:39:27 crc kubenswrapper[4798]: I1011 04:39:27.141370 4798 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"cfcd870e30121e7648d377b94f1af550547a76a44c2be1c2cfee763d9b8be3df"} pod="openshift-machine-config-operator/machine-config-daemon-h28s2" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Oct 11 04:39:27 crc kubenswrapper[4798]: I1011 04:39:27.141561 4798 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" containerName="machine-config-daemon" containerID="cri-o://cfcd870e30121e7648d377b94f1af550547a76a44c2be1c2cfee763d9b8be3df" gracePeriod=600 Oct 11 04:39:27 crc kubenswrapper[4798]: I1011 04:39:27.604598 4798 generic.go:334] "Generic (PLEG): container finished" podID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" containerID="cfcd870e30121e7648d377b94f1af550547a76a44c2be1c2cfee763d9b8be3df" exitCode=0 Oct 11 04:39:27 crc kubenswrapper[4798]: I1011 04:39:27.604830 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" event={"ID":"42571bc8-2186-4e3b-bba9-28f5a8f364d0","Type":"ContainerDied","Data":"cfcd870e30121e7648d377b94f1af550547a76a44c2be1c2cfee763d9b8be3df"} Oct 11 04:39:27 crc kubenswrapper[4798]: I1011 04:39:27.605176 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" event={"ID":"42571bc8-2186-4e3b-bba9-28f5a8f364d0","Type":"ContainerStarted","Data":"999be6c75e4c41e3d1de7a3a50ee34084c0f4ead3f5f5fdebf32b1d0054c6b5f"} Oct 11 04:39:27 crc kubenswrapper[4798]: I1011 04:39:27.605226 4798 scope.go:117] "RemoveContainer" containerID="e764c82b5ef4905e7c6c502957bfa38bb423ee4a4677ee706a5d2b87d33d2c1c" Oct 11 04:40:40 crc kubenswrapper[4798]: I1011 04:40:40.377042 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-228dw"] Oct 11 04:40:40 crc kubenswrapper[4798]: I1011 04:40:40.390158 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-228dw" Oct 11 04:40:40 crc kubenswrapper[4798]: I1011 04:40:40.422685 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-228dw"] Oct 11 04:40:40 crc kubenswrapper[4798]: I1011 04:40:40.499230 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dmbkr\" (UniqueName: \"kubernetes.io/projected/e72d835c-5b5a-4c3c-9416-16f3c715c0f4-kube-api-access-dmbkr\") pod \"certified-operators-228dw\" (UID: \"e72d835c-5b5a-4c3c-9416-16f3c715c0f4\") " pod="openshift-marketplace/certified-operators-228dw" Oct 11 04:40:40 crc kubenswrapper[4798]: I1011 04:40:40.499733 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e72d835c-5b5a-4c3c-9416-16f3c715c0f4-utilities\") pod \"certified-operators-228dw\" (UID: \"e72d835c-5b5a-4c3c-9416-16f3c715c0f4\") " pod="openshift-marketplace/certified-operators-228dw" Oct 11 04:40:40 crc kubenswrapper[4798]: I1011 04:40:40.500052 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e72d835c-5b5a-4c3c-9416-16f3c715c0f4-catalog-content\") pod \"certified-operators-228dw\" (UID: \"e72d835c-5b5a-4c3c-9416-16f3c715c0f4\") " pod="openshift-marketplace/certified-operators-228dw" Oct 11 04:40:40 crc kubenswrapper[4798]: I1011 04:40:40.602209 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e72d835c-5b5a-4c3c-9416-16f3c715c0f4-catalog-content\") pod \"certified-operators-228dw\" (UID: \"e72d835c-5b5a-4c3c-9416-16f3c715c0f4\") " pod="openshift-marketplace/certified-operators-228dw" Oct 11 04:40:40 crc kubenswrapper[4798]: I1011 04:40:40.602366 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dmbkr\" (UniqueName: \"kubernetes.io/projected/e72d835c-5b5a-4c3c-9416-16f3c715c0f4-kube-api-access-dmbkr\") pod \"certified-operators-228dw\" (UID: \"e72d835c-5b5a-4c3c-9416-16f3c715c0f4\") " pod="openshift-marketplace/certified-operators-228dw" Oct 11 04:40:40 crc kubenswrapper[4798]: I1011 04:40:40.602492 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e72d835c-5b5a-4c3c-9416-16f3c715c0f4-utilities\") pod \"certified-operators-228dw\" (UID: \"e72d835c-5b5a-4c3c-9416-16f3c715c0f4\") " pod="openshift-marketplace/certified-operators-228dw" Oct 11 04:40:40 crc kubenswrapper[4798]: I1011 04:40:40.603875 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e72d835c-5b5a-4c3c-9416-16f3c715c0f4-catalog-content\") pod \"certified-operators-228dw\" (UID: \"e72d835c-5b5a-4c3c-9416-16f3c715c0f4\") " pod="openshift-marketplace/certified-operators-228dw" Oct 11 04:40:40 crc kubenswrapper[4798]: I1011 04:40:40.603942 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e72d835c-5b5a-4c3c-9416-16f3c715c0f4-utilities\") pod \"certified-operators-228dw\" (UID: \"e72d835c-5b5a-4c3c-9416-16f3c715c0f4\") " pod="openshift-marketplace/certified-operators-228dw" Oct 11 04:40:40 crc kubenswrapper[4798]: I1011 04:40:40.639223 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dmbkr\" (UniqueName: \"kubernetes.io/projected/e72d835c-5b5a-4c3c-9416-16f3c715c0f4-kube-api-access-dmbkr\") pod \"certified-operators-228dw\" (UID: \"e72d835c-5b5a-4c3c-9416-16f3c715c0f4\") " pod="openshift-marketplace/certified-operators-228dw" Oct 11 04:40:40 crc kubenswrapper[4798]: I1011 04:40:40.744220 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-228dw" Oct 11 04:40:41 crc kubenswrapper[4798]: I1011 04:40:41.335427 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-228dw"] Oct 11 04:40:41 crc kubenswrapper[4798]: I1011 04:40:41.605949 4798 generic.go:334] "Generic (PLEG): container finished" podID="e72d835c-5b5a-4c3c-9416-16f3c715c0f4" containerID="006cafe3c1d6e5c88e7fa96b4d665dcac3df5846c4519dae22664f5ef7dc9b82" exitCode=0 Oct 11 04:40:41 crc kubenswrapper[4798]: I1011 04:40:41.606302 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-228dw" event={"ID":"e72d835c-5b5a-4c3c-9416-16f3c715c0f4","Type":"ContainerDied","Data":"006cafe3c1d6e5c88e7fa96b4d665dcac3df5846c4519dae22664f5ef7dc9b82"} Oct 11 04:40:41 crc kubenswrapper[4798]: I1011 04:40:41.606554 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-228dw" event={"ID":"e72d835c-5b5a-4c3c-9416-16f3c715c0f4","Type":"ContainerStarted","Data":"f42ac77901dfac804314242cfcd216902b8080386116a1002decfe90a0a7bdd4"} Oct 11 04:40:41 crc kubenswrapper[4798]: I1011 04:40:41.613561 4798 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Oct 11 04:40:43 crc kubenswrapper[4798]: I1011 04:40:43.632421 4798 generic.go:334] "Generic (PLEG): container finished" podID="e72d835c-5b5a-4c3c-9416-16f3c715c0f4" containerID="ecd63a8661c89e6e52b835421b4d3f51cc83424a6d1135fcdf6172e211ebf82d" exitCode=0 Oct 11 04:40:43 crc kubenswrapper[4798]: I1011 04:40:43.632527 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-228dw" event={"ID":"e72d835c-5b5a-4c3c-9416-16f3c715c0f4","Type":"ContainerDied","Data":"ecd63a8661c89e6e52b835421b4d3f51cc83424a6d1135fcdf6172e211ebf82d"} Oct 11 04:40:44 crc kubenswrapper[4798]: I1011 04:40:44.654901 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-228dw" event={"ID":"e72d835c-5b5a-4c3c-9416-16f3c715c0f4","Type":"ContainerStarted","Data":"9586810b65846db93458abb704205245aeb0edf4aa7719a44a6f516e0015d509"} Oct 11 04:40:44 crc kubenswrapper[4798]: I1011 04:40:44.706232 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-228dw" podStartSLOduration=2.204541519 podStartE2EDuration="4.705993003s" podCreationTimestamp="2025-10-11 04:40:40 +0000 UTC" firstStartedPulling="2025-10-11 04:40:41.61325822 +0000 UTC m=+2736.949547896" lastFinishedPulling="2025-10-11 04:40:44.114709694 +0000 UTC m=+2739.450999380" observedRunningTime="2025-10-11 04:40:44.693054349 +0000 UTC m=+2740.029344055" watchObservedRunningTime="2025-10-11 04:40:44.705993003 +0000 UTC m=+2740.042282689" Oct 11 04:40:50 crc kubenswrapper[4798]: I1011 04:40:50.745278 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-228dw" Oct 11 04:40:50 crc kubenswrapper[4798]: I1011 04:40:50.746528 4798 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-228dw" Oct 11 04:40:50 crc kubenswrapper[4798]: I1011 04:40:50.829005 4798 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-228dw" Oct 11 04:40:51 crc kubenswrapper[4798]: I1011 04:40:51.797979 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-228dw" Oct 11 04:40:51 crc kubenswrapper[4798]: I1011 04:40:51.876529 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-228dw"] Oct 11 04:40:53 crc kubenswrapper[4798]: I1011 04:40:53.760665 4798 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-228dw" podUID="e72d835c-5b5a-4c3c-9416-16f3c715c0f4" containerName="registry-server" containerID="cri-o://9586810b65846db93458abb704205245aeb0edf4aa7719a44a6f516e0015d509" gracePeriod=2 Oct 11 04:40:54 crc kubenswrapper[4798]: I1011 04:40:54.311681 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-228dw" Oct 11 04:40:54 crc kubenswrapper[4798]: I1011 04:40:54.352889 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e72d835c-5b5a-4c3c-9416-16f3c715c0f4-utilities\") pod \"e72d835c-5b5a-4c3c-9416-16f3c715c0f4\" (UID: \"e72d835c-5b5a-4c3c-9416-16f3c715c0f4\") " Oct 11 04:40:54 crc kubenswrapper[4798]: I1011 04:40:54.353146 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e72d835c-5b5a-4c3c-9416-16f3c715c0f4-catalog-content\") pod \"e72d835c-5b5a-4c3c-9416-16f3c715c0f4\" (UID: \"e72d835c-5b5a-4c3c-9416-16f3c715c0f4\") " Oct 11 04:40:54 crc kubenswrapper[4798]: I1011 04:40:54.353275 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dmbkr\" (UniqueName: \"kubernetes.io/projected/e72d835c-5b5a-4c3c-9416-16f3c715c0f4-kube-api-access-dmbkr\") pod \"e72d835c-5b5a-4c3c-9416-16f3c715c0f4\" (UID: \"e72d835c-5b5a-4c3c-9416-16f3c715c0f4\") " Oct 11 04:40:54 crc kubenswrapper[4798]: I1011 04:40:54.355924 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e72d835c-5b5a-4c3c-9416-16f3c715c0f4-utilities" (OuterVolumeSpecName: "utilities") pod "e72d835c-5b5a-4c3c-9416-16f3c715c0f4" (UID: "e72d835c-5b5a-4c3c-9416-16f3c715c0f4"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 04:40:54 crc kubenswrapper[4798]: I1011 04:40:54.366935 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e72d835c-5b5a-4c3c-9416-16f3c715c0f4-kube-api-access-dmbkr" (OuterVolumeSpecName: "kube-api-access-dmbkr") pod "e72d835c-5b5a-4c3c-9416-16f3c715c0f4" (UID: "e72d835c-5b5a-4c3c-9416-16f3c715c0f4"). InnerVolumeSpecName "kube-api-access-dmbkr". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 04:40:54 crc kubenswrapper[4798]: I1011 04:40:54.431151 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e72d835c-5b5a-4c3c-9416-16f3c715c0f4-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "e72d835c-5b5a-4c3c-9416-16f3c715c0f4" (UID: "e72d835c-5b5a-4c3c-9416-16f3c715c0f4"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 04:40:54 crc kubenswrapper[4798]: I1011 04:40:54.455348 4798 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e72d835c-5b5a-4c3c-9416-16f3c715c0f4-utilities\") on node \"crc\" DevicePath \"\"" Oct 11 04:40:54 crc kubenswrapper[4798]: I1011 04:40:54.455376 4798 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e72d835c-5b5a-4c3c-9416-16f3c715c0f4-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 11 04:40:54 crc kubenswrapper[4798]: I1011 04:40:54.455541 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dmbkr\" (UniqueName: \"kubernetes.io/projected/e72d835c-5b5a-4c3c-9416-16f3c715c0f4-kube-api-access-dmbkr\") on node \"crc\" DevicePath \"\"" Oct 11 04:40:54 crc kubenswrapper[4798]: I1011 04:40:54.776680 4798 generic.go:334] "Generic (PLEG): container finished" podID="e72d835c-5b5a-4c3c-9416-16f3c715c0f4" containerID="9586810b65846db93458abb704205245aeb0edf4aa7719a44a6f516e0015d509" exitCode=0 Oct 11 04:40:54 crc kubenswrapper[4798]: I1011 04:40:54.776746 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-228dw" event={"ID":"e72d835c-5b5a-4c3c-9416-16f3c715c0f4","Type":"ContainerDied","Data":"9586810b65846db93458abb704205245aeb0edf4aa7719a44a6f516e0015d509"} Oct 11 04:40:54 crc kubenswrapper[4798]: I1011 04:40:54.776872 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-228dw" event={"ID":"e72d835c-5b5a-4c3c-9416-16f3c715c0f4","Type":"ContainerDied","Data":"f42ac77901dfac804314242cfcd216902b8080386116a1002decfe90a0a7bdd4"} Oct 11 04:40:54 crc kubenswrapper[4798]: I1011 04:40:54.776863 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-228dw" Oct 11 04:40:54 crc kubenswrapper[4798]: I1011 04:40:54.776902 4798 scope.go:117] "RemoveContainer" containerID="9586810b65846db93458abb704205245aeb0edf4aa7719a44a6f516e0015d509" Oct 11 04:40:54 crc kubenswrapper[4798]: I1011 04:40:54.824295 4798 scope.go:117] "RemoveContainer" containerID="ecd63a8661c89e6e52b835421b4d3f51cc83424a6d1135fcdf6172e211ebf82d" Oct 11 04:40:54 crc kubenswrapper[4798]: I1011 04:40:54.843051 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-228dw"] Oct 11 04:40:54 crc kubenswrapper[4798]: I1011 04:40:54.855679 4798 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-228dw"] Oct 11 04:40:54 crc kubenswrapper[4798]: I1011 04:40:54.870753 4798 scope.go:117] "RemoveContainer" containerID="006cafe3c1d6e5c88e7fa96b4d665dcac3df5846c4519dae22664f5ef7dc9b82" Oct 11 04:40:54 crc kubenswrapper[4798]: I1011 04:40:54.932553 4798 scope.go:117] "RemoveContainer" containerID="9586810b65846db93458abb704205245aeb0edf4aa7719a44a6f516e0015d509" Oct 11 04:40:54 crc kubenswrapper[4798]: E1011 04:40:54.933084 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9586810b65846db93458abb704205245aeb0edf4aa7719a44a6f516e0015d509\": container with ID starting with 9586810b65846db93458abb704205245aeb0edf4aa7719a44a6f516e0015d509 not found: ID does not exist" containerID="9586810b65846db93458abb704205245aeb0edf4aa7719a44a6f516e0015d509" Oct 11 04:40:54 crc kubenswrapper[4798]: I1011 04:40:54.933124 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9586810b65846db93458abb704205245aeb0edf4aa7719a44a6f516e0015d509"} err="failed to get container status \"9586810b65846db93458abb704205245aeb0edf4aa7719a44a6f516e0015d509\": rpc error: code = NotFound desc = could not find container \"9586810b65846db93458abb704205245aeb0edf4aa7719a44a6f516e0015d509\": container with ID starting with 9586810b65846db93458abb704205245aeb0edf4aa7719a44a6f516e0015d509 not found: ID does not exist" Oct 11 04:40:54 crc kubenswrapper[4798]: I1011 04:40:54.933152 4798 scope.go:117] "RemoveContainer" containerID="ecd63a8661c89e6e52b835421b4d3f51cc83424a6d1135fcdf6172e211ebf82d" Oct 11 04:40:54 crc kubenswrapper[4798]: E1011 04:40:54.933792 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ecd63a8661c89e6e52b835421b4d3f51cc83424a6d1135fcdf6172e211ebf82d\": container with ID starting with ecd63a8661c89e6e52b835421b4d3f51cc83424a6d1135fcdf6172e211ebf82d not found: ID does not exist" containerID="ecd63a8661c89e6e52b835421b4d3f51cc83424a6d1135fcdf6172e211ebf82d" Oct 11 04:40:54 crc kubenswrapper[4798]: I1011 04:40:54.933836 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ecd63a8661c89e6e52b835421b4d3f51cc83424a6d1135fcdf6172e211ebf82d"} err="failed to get container status \"ecd63a8661c89e6e52b835421b4d3f51cc83424a6d1135fcdf6172e211ebf82d\": rpc error: code = NotFound desc = could not find container \"ecd63a8661c89e6e52b835421b4d3f51cc83424a6d1135fcdf6172e211ebf82d\": container with ID starting with ecd63a8661c89e6e52b835421b4d3f51cc83424a6d1135fcdf6172e211ebf82d not found: ID does not exist" Oct 11 04:40:54 crc kubenswrapper[4798]: I1011 04:40:54.933865 4798 scope.go:117] "RemoveContainer" containerID="006cafe3c1d6e5c88e7fa96b4d665dcac3df5846c4519dae22664f5ef7dc9b82" Oct 11 04:40:54 crc kubenswrapper[4798]: E1011 04:40:54.934229 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"006cafe3c1d6e5c88e7fa96b4d665dcac3df5846c4519dae22664f5ef7dc9b82\": container with ID starting with 006cafe3c1d6e5c88e7fa96b4d665dcac3df5846c4519dae22664f5ef7dc9b82 not found: ID does not exist" containerID="006cafe3c1d6e5c88e7fa96b4d665dcac3df5846c4519dae22664f5ef7dc9b82" Oct 11 04:40:54 crc kubenswrapper[4798]: I1011 04:40:54.934293 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"006cafe3c1d6e5c88e7fa96b4d665dcac3df5846c4519dae22664f5ef7dc9b82"} err="failed to get container status \"006cafe3c1d6e5c88e7fa96b4d665dcac3df5846c4519dae22664f5ef7dc9b82\": rpc error: code = NotFound desc = could not find container \"006cafe3c1d6e5c88e7fa96b4d665dcac3df5846c4519dae22664f5ef7dc9b82\": container with ID starting with 006cafe3c1d6e5c88e7fa96b4d665dcac3df5846c4519dae22664f5ef7dc9b82 not found: ID does not exist" Oct 11 04:40:55 crc kubenswrapper[4798]: I1011 04:40:55.437306 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e72d835c-5b5a-4c3c-9416-16f3c715c0f4" path="/var/lib/kubelet/pods/e72d835c-5b5a-4c3c-9416-16f3c715c0f4/volumes" Oct 11 04:40:59 crc kubenswrapper[4798]: I1011 04:40:59.538304 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-tvdf8"] Oct 11 04:40:59 crc kubenswrapper[4798]: E1011 04:40:59.540852 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e72d835c-5b5a-4c3c-9416-16f3c715c0f4" containerName="extract-utilities" Oct 11 04:40:59 crc kubenswrapper[4798]: I1011 04:40:59.540893 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="e72d835c-5b5a-4c3c-9416-16f3c715c0f4" containerName="extract-utilities" Oct 11 04:40:59 crc kubenswrapper[4798]: E1011 04:40:59.540925 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e72d835c-5b5a-4c3c-9416-16f3c715c0f4" containerName="extract-content" Oct 11 04:40:59 crc kubenswrapper[4798]: I1011 04:40:59.540944 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="e72d835c-5b5a-4c3c-9416-16f3c715c0f4" containerName="extract-content" Oct 11 04:40:59 crc kubenswrapper[4798]: E1011 04:40:59.540972 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e72d835c-5b5a-4c3c-9416-16f3c715c0f4" containerName="registry-server" Oct 11 04:40:59 crc kubenswrapper[4798]: I1011 04:40:59.540990 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="e72d835c-5b5a-4c3c-9416-16f3c715c0f4" containerName="registry-server" Oct 11 04:40:59 crc kubenswrapper[4798]: I1011 04:40:59.541512 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="e72d835c-5b5a-4c3c-9416-16f3c715c0f4" containerName="registry-server" Oct 11 04:40:59 crc kubenswrapper[4798]: I1011 04:40:59.545999 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-tvdf8" Oct 11 04:40:59 crc kubenswrapper[4798]: I1011 04:40:59.559995 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-tvdf8"] Oct 11 04:40:59 crc kubenswrapper[4798]: I1011 04:40:59.682790 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f754c8f6-7370-4cfb-9435-beb5c46c11b8-catalog-content\") pod \"redhat-operators-tvdf8\" (UID: \"f754c8f6-7370-4cfb-9435-beb5c46c11b8\") " pod="openshift-marketplace/redhat-operators-tvdf8" Oct 11 04:40:59 crc kubenswrapper[4798]: I1011 04:40:59.683232 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f754c8f6-7370-4cfb-9435-beb5c46c11b8-utilities\") pod \"redhat-operators-tvdf8\" (UID: \"f754c8f6-7370-4cfb-9435-beb5c46c11b8\") " pod="openshift-marketplace/redhat-operators-tvdf8" Oct 11 04:40:59 crc kubenswrapper[4798]: I1011 04:40:59.683476 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vbjbk\" (UniqueName: \"kubernetes.io/projected/f754c8f6-7370-4cfb-9435-beb5c46c11b8-kube-api-access-vbjbk\") pod \"redhat-operators-tvdf8\" (UID: \"f754c8f6-7370-4cfb-9435-beb5c46c11b8\") " pod="openshift-marketplace/redhat-operators-tvdf8" Oct 11 04:40:59 crc kubenswrapper[4798]: I1011 04:40:59.786689 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f754c8f6-7370-4cfb-9435-beb5c46c11b8-catalog-content\") pod \"redhat-operators-tvdf8\" (UID: \"f754c8f6-7370-4cfb-9435-beb5c46c11b8\") " pod="openshift-marketplace/redhat-operators-tvdf8" Oct 11 04:40:59 crc kubenswrapper[4798]: I1011 04:40:59.786752 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f754c8f6-7370-4cfb-9435-beb5c46c11b8-utilities\") pod \"redhat-operators-tvdf8\" (UID: \"f754c8f6-7370-4cfb-9435-beb5c46c11b8\") " pod="openshift-marketplace/redhat-operators-tvdf8" Oct 11 04:40:59 crc kubenswrapper[4798]: I1011 04:40:59.786803 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vbjbk\" (UniqueName: \"kubernetes.io/projected/f754c8f6-7370-4cfb-9435-beb5c46c11b8-kube-api-access-vbjbk\") pod \"redhat-operators-tvdf8\" (UID: \"f754c8f6-7370-4cfb-9435-beb5c46c11b8\") " pod="openshift-marketplace/redhat-operators-tvdf8" Oct 11 04:40:59 crc kubenswrapper[4798]: I1011 04:40:59.787948 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f754c8f6-7370-4cfb-9435-beb5c46c11b8-catalog-content\") pod \"redhat-operators-tvdf8\" (UID: \"f754c8f6-7370-4cfb-9435-beb5c46c11b8\") " pod="openshift-marketplace/redhat-operators-tvdf8" Oct 11 04:40:59 crc kubenswrapper[4798]: I1011 04:40:59.788001 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f754c8f6-7370-4cfb-9435-beb5c46c11b8-utilities\") pod \"redhat-operators-tvdf8\" (UID: \"f754c8f6-7370-4cfb-9435-beb5c46c11b8\") " pod="openshift-marketplace/redhat-operators-tvdf8" Oct 11 04:40:59 crc kubenswrapper[4798]: I1011 04:40:59.822606 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vbjbk\" (UniqueName: \"kubernetes.io/projected/f754c8f6-7370-4cfb-9435-beb5c46c11b8-kube-api-access-vbjbk\") pod \"redhat-operators-tvdf8\" (UID: \"f754c8f6-7370-4cfb-9435-beb5c46c11b8\") " pod="openshift-marketplace/redhat-operators-tvdf8" Oct 11 04:40:59 crc kubenswrapper[4798]: I1011 04:40:59.884024 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-tvdf8" Oct 11 04:41:00 crc kubenswrapper[4798]: I1011 04:41:00.395747 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-tvdf8"] Oct 11 04:41:00 crc kubenswrapper[4798]: I1011 04:41:00.849210 4798 generic.go:334] "Generic (PLEG): container finished" podID="f754c8f6-7370-4cfb-9435-beb5c46c11b8" containerID="c49a1638d4a4ef3f7a1f23de54083a3a91c5ab28c84b536658c640e98e41f67e" exitCode=0 Oct 11 04:41:00 crc kubenswrapper[4798]: I1011 04:41:00.849301 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tvdf8" event={"ID":"f754c8f6-7370-4cfb-9435-beb5c46c11b8","Type":"ContainerDied","Data":"c49a1638d4a4ef3f7a1f23de54083a3a91c5ab28c84b536658c640e98e41f67e"} Oct 11 04:41:00 crc kubenswrapper[4798]: I1011 04:41:00.849584 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tvdf8" event={"ID":"f754c8f6-7370-4cfb-9435-beb5c46c11b8","Type":"ContainerStarted","Data":"5973651943cd3df87c7d3483146a7be5ea38bb2285c07b22d6c3197b7a62a48e"} Oct 11 04:41:02 crc kubenswrapper[4798]: I1011 04:41:02.872287 4798 generic.go:334] "Generic (PLEG): container finished" podID="f754c8f6-7370-4cfb-9435-beb5c46c11b8" containerID="3134cacaeed8e325c49f75516175aaea00c74ed5cd05dcfb6eb237e0f17a088c" exitCode=0 Oct 11 04:41:02 crc kubenswrapper[4798]: I1011 04:41:02.872440 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tvdf8" event={"ID":"f754c8f6-7370-4cfb-9435-beb5c46c11b8","Type":"ContainerDied","Data":"3134cacaeed8e325c49f75516175aaea00c74ed5cd05dcfb6eb237e0f17a088c"} Oct 11 04:41:03 crc kubenswrapper[4798]: I1011 04:41:03.887698 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tvdf8" event={"ID":"f754c8f6-7370-4cfb-9435-beb5c46c11b8","Type":"ContainerStarted","Data":"f31ed1a40d8e0a41db377c4ca02d558ba3ae16710df57a77d94bf647957bed3b"} Oct 11 04:41:03 crc kubenswrapper[4798]: I1011 04:41:03.932733 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-tvdf8" podStartSLOduration=2.510642113 podStartE2EDuration="4.932698353s" podCreationTimestamp="2025-10-11 04:40:59 +0000 UTC" firstStartedPulling="2025-10-11 04:41:00.850978899 +0000 UTC m=+2756.187268585" lastFinishedPulling="2025-10-11 04:41:03.273035119 +0000 UTC m=+2758.609324825" observedRunningTime="2025-10-11 04:41:03.922100844 +0000 UTC m=+2759.258390590" watchObservedRunningTime="2025-10-11 04:41:03.932698353 +0000 UTC m=+2759.268988079" Oct 11 04:41:09 crc kubenswrapper[4798]: I1011 04:41:09.885235 4798 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-tvdf8" Oct 11 04:41:09 crc kubenswrapper[4798]: I1011 04:41:09.885862 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-tvdf8" Oct 11 04:41:09 crc kubenswrapper[4798]: I1011 04:41:09.983095 4798 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-tvdf8" Oct 11 04:41:10 crc kubenswrapper[4798]: I1011 04:41:10.061751 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-tvdf8" Oct 11 04:41:10 crc kubenswrapper[4798]: I1011 04:41:10.242672 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-tvdf8"] Oct 11 04:41:11 crc kubenswrapper[4798]: I1011 04:41:11.977298 4798 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-tvdf8" podUID="f754c8f6-7370-4cfb-9435-beb5c46c11b8" containerName="registry-server" containerID="cri-o://f31ed1a40d8e0a41db377c4ca02d558ba3ae16710df57a77d94bf647957bed3b" gracePeriod=2 Oct 11 04:41:12 crc kubenswrapper[4798]: I1011 04:41:12.478230 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-tvdf8" Oct 11 04:41:12 crc kubenswrapper[4798]: I1011 04:41:12.575346 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f754c8f6-7370-4cfb-9435-beb5c46c11b8-utilities\") pod \"f754c8f6-7370-4cfb-9435-beb5c46c11b8\" (UID: \"f754c8f6-7370-4cfb-9435-beb5c46c11b8\") " Oct 11 04:41:12 crc kubenswrapper[4798]: I1011 04:41:12.575439 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vbjbk\" (UniqueName: \"kubernetes.io/projected/f754c8f6-7370-4cfb-9435-beb5c46c11b8-kube-api-access-vbjbk\") pod \"f754c8f6-7370-4cfb-9435-beb5c46c11b8\" (UID: \"f754c8f6-7370-4cfb-9435-beb5c46c11b8\") " Oct 11 04:41:12 crc kubenswrapper[4798]: I1011 04:41:12.575551 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f754c8f6-7370-4cfb-9435-beb5c46c11b8-catalog-content\") pod \"f754c8f6-7370-4cfb-9435-beb5c46c11b8\" (UID: \"f754c8f6-7370-4cfb-9435-beb5c46c11b8\") " Oct 11 04:41:12 crc kubenswrapper[4798]: I1011 04:41:12.577576 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f754c8f6-7370-4cfb-9435-beb5c46c11b8-utilities" (OuterVolumeSpecName: "utilities") pod "f754c8f6-7370-4cfb-9435-beb5c46c11b8" (UID: "f754c8f6-7370-4cfb-9435-beb5c46c11b8"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 04:41:12 crc kubenswrapper[4798]: I1011 04:41:12.585113 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f754c8f6-7370-4cfb-9435-beb5c46c11b8-kube-api-access-vbjbk" (OuterVolumeSpecName: "kube-api-access-vbjbk") pod "f754c8f6-7370-4cfb-9435-beb5c46c11b8" (UID: "f754c8f6-7370-4cfb-9435-beb5c46c11b8"). InnerVolumeSpecName "kube-api-access-vbjbk". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 04:41:12 crc kubenswrapper[4798]: I1011 04:41:12.678310 4798 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f754c8f6-7370-4cfb-9435-beb5c46c11b8-utilities\") on node \"crc\" DevicePath \"\"" Oct 11 04:41:12 crc kubenswrapper[4798]: I1011 04:41:12.678346 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vbjbk\" (UniqueName: \"kubernetes.io/projected/f754c8f6-7370-4cfb-9435-beb5c46c11b8-kube-api-access-vbjbk\") on node \"crc\" DevicePath \"\"" Oct 11 04:41:12 crc kubenswrapper[4798]: I1011 04:41:12.992544 4798 generic.go:334] "Generic (PLEG): container finished" podID="f754c8f6-7370-4cfb-9435-beb5c46c11b8" containerID="f31ed1a40d8e0a41db377c4ca02d558ba3ae16710df57a77d94bf647957bed3b" exitCode=0 Oct 11 04:41:12 crc kubenswrapper[4798]: I1011 04:41:12.992617 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tvdf8" event={"ID":"f754c8f6-7370-4cfb-9435-beb5c46c11b8","Type":"ContainerDied","Data":"f31ed1a40d8e0a41db377c4ca02d558ba3ae16710df57a77d94bf647957bed3b"} Oct 11 04:41:12 crc kubenswrapper[4798]: I1011 04:41:12.992659 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tvdf8" event={"ID":"f754c8f6-7370-4cfb-9435-beb5c46c11b8","Type":"ContainerDied","Data":"5973651943cd3df87c7d3483146a7be5ea38bb2285c07b22d6c3197b7a62a48e"} Oct 11 04:41:12 crc kubenswrapper[4798]: I1011 04:41:12.992656 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-tvdf8" Oct 11 04:41:12 crc kubenswrapper[4798]: I1011 04:41:12.992719 4798 scope.go:117] "RemoveContainer" containerID="f31ed1a40d8e0a41db377c4ca02d558ba3ae16710df57a77d94bf647957bed3b" Oct 11 04:41:13 crc kubenswrapper[4798]: I1011 04:41:13.041865 4798 scope.go:117] "RemoveContainer" containerID="3134cacaeed8e325c49f75516175aaea00c74ed5cd05dcfb6eb237e0f17a088c" Oct 11 04:41:13 crc kubenswrapper[4798]: I1011 04:41:13.091908 4798 scope.go:117] "RemoveContainer" containerID="c49a1638d4a4ef3f7a1f23de54083a3a91c5ab28c84b536658c640e98e41f67e" Oct 11 04:41:13 crc kubenswrapper[4798]: I1011 04:41:13.156580 4798 scope.go:117] "RemoveContainer" containerID="f31ed1a40d8e0a41db377c4ca02d558ba3ae16710df57a77d94bf647957bed3b" Oct 11 04:41:13 crc kubenswrapper[4798]: E1011 04:41:13.157368 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f31ed1a40d8e0a41db377c4ca02d558ba3ae16710df57a77d94bf647957bed3b\": container with ID starting with f31ed1a40d8e0a41db377c4ca02d558ba3ae16710df57a77d94bf647957bed3b not found: ID does not exist" containerID="f31ed1a40d8e0a41db377c4ca02d558ba3ae16710df57a77d94bf647957bed3b" Oct 11 04:41:13 crc kubenswrapper[4798]: I1011 04:41:13.157481 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f31ed1a40d8e0a41db377c4ca02d558ba3ae16710df57a77d94bf647957bed3b"} err="failed to get container status \"f31ed1a40d8e0a41db377c4ca02d558ba3ae16710df57a77d94bf647957bed3b\": rpc error: code = NotFound desc = could not find container \"f31ed1a40d8e0a41db377c4ca02d558ba3ae16710df57a77d94bf647957bed3b\": container with ID starting with f31ed1a40d8e0a41db377c4ca02d558ba3ae16710df57a77d94bf647957bed3b not found: ID does not exist" Oct 11 04:41:13 crc kubenswrapper[4798]: I1011 04:41:13.157521 4798 scope.go:117] "RemoveContainer" containerID="3134cacaeed8e325c49f75516175aaea00c74ed5cd05dcfb6eb237e0f17a088c" Oct 11 04:41:13 crc kubenswrapper[4798]: E1011 04:41:13.157988 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3134cacaeed8e325c49f75516175aaea00c74ed5cd05dcfb6eb237e0f17a088c\": container with ID starting with 3134cacaeed8e325c49f75516175aaea00c74ed5cd05dcfb6eb237e0f17a088c not found: ID does not exist" containerID="3134cacaeed8e325c49f75516175aaea00c74ed5cd05dcfb6eb237e0f17a088c" Oct 11 04:41:13 crc kubenswrapper[4798]: I1011 04:41:13.158031 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3134cacaeed8e325c49f75516175aaea00c74ed5cd05dcfb6eb237e0f17a088c"} err="failed to get container status \"3134cacaeed8e325c49f75516175aaea00c74ed5cd05dcfb6eb237e0f17a088c\": rpc error: code = NotFound desc = could not find container \"3134cacaeed8e325c49f75516175aaea00c74ed5cd05dcfb6eb237e0f17a088c\": container with ID starting with 3134cacaeed8e325c49f75516175aaea00c74ed5cd05dcfb6eb237e0f17a088c not found: ID does not exist" Oct 11 04:41:13 crc kubenswrapper[4798]: I1011 04:41:13.158061 4798 scope.go:117] "RemoveContainer" containerID="c49a1638d4a4ef3f7a1f23de54083a3a91c5ab28c84b536658c640e98e41f67e" Oct 11 04:41:13 crc kubenswrapper[4798]: E1011 04:41:13.158583 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c49a1638d4a4ef3f7a1f23de54083a3a91c5ab28c84b536658c640e98e41f67e\": container with ID starting with c49a1638d4a4ef3f7a1f23de54083a3a91c5ab28c84b536658c640e98e41f67e not found: ID does not exist" containerID="c49a1638d4a4ef3f7a1f23de54083a3a91c5ab28c84b536658c640e98e41f67e" Oct 11 04:41:13 crc kubenswrapper[4798]: I1011 04:41:13.158636 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c49a1638d4a4ef3f7a1f23de54083a3a91c5ab28c84b536658c640e98e41f67e"} err="failed to get container status \"c49a1638d4a4ef3f7a1f23de54083a3a91c5ab28c84b536658c640e98e41f67e\": rpc error: code = NotFound desc = could not find container \"c49a1638d4a4ef3f7a1f23de54083a3a91c5ab28c84b536658c640e98e41f67e\": container with ID starting with c49a1638d4a4ef3f7a1f23de54083a3a91c5ab28c84b536658c640e98e41f67e not found: ID does not exist" Oct 11 04:41:13 crc kubenswrapper[4798]: I1011 04:41:13.171567 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f754c8f6-7370-4cfb-9435-beb5c46c11b8-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "f754c8f6-7370-4cfb-9435-beb5c46c11b8" (UID: "f754c8f6-7370-4cfb-9435-beb5c46c11b8"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 04:41:13 crc kubenswrapper[4798]: I1011 04:41:13.193492 4798 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f754c8f6-7370-4cfb-9435-beb5c46c11b8-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 11 04:41:13 crc kubenswrapper[4798]: I1011 04:41:13.354899 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-tvdf8"] Oct 11 04:41:13 crc kubenswrapper[4798]: I1011 04:41:13.368479 4798 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-tvdf8"] Oct 11 04:41:13 crc kubenswrapper[4798]: I1011 04:41:13.455102 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f754c8f6-7370-4cfb-9435-beb5c46c11b8" path="/var/lib/kubelet/pods/f754c8f6-7370-4cfb-9435-beb5c46c11b8/volumes" Oct 11 04:41:27 crc kubenswrapper[4798]: I1011 04:41:27.139127 4798 patch_prober.go:28] interesting pod/machine-config-daemon-h28s2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 11 04:41:27 crc kubenswrapper[4798]: I1011 04:41:27.139932 4798 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 11 04:41:57 crc kubenswrapper[4798]: I1011 04:41:57.138881 4798 patch_prober.go:28] interesting pod/machine-config-daemon-h28s2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 11 04:41:57 crc kubenswrapper[4798]: I1011 04:41:57.139628 4798 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 11 04:42:08 crc kubenswrapper[4798]: I1011 04:42:08.783557 4798 generic.go:334] "Generic (PLEG): container finished" podID="8550f028-bbd9-455c-9c93-0e813f2a95ed" containerID="96c7ca14409540dac953477baee2bab81c7f3f91fe56dc813e366ef2a1148c48" exitCode=0 Oct 11 04:42:08 crc kubenswrapper[4798]: I1011 04:42:08.783616 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-649lj" event={"ID":"8550f028-bbd9-455c-9c93-0e813f2a95ed","Type":"ContainerDied","Data":"96c7ca14409540dac953477baee2bab81c7f3f91fe56dc813e366ef2a1148c48"} Oct 11 04:42:10 crc kubenswrapper[4798]: I1011 04:42:10.386085 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-649lj" Oct 11 04:42:10 crc kubenswrapper[4798]: I1011 04:42:10.559889 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/8550f028-bbd9-455c-9c93-0e813f2a95ed-ceph\") pod \"8550f028-bbd9-455c-9c93-0e813f2a95ed\" (UID: \"8550f028-bbd9-455c-9c93-0e813f2a95ed\") " Oct 11 04:42:10 crc kubenswrapper[4798]: I1011 04:42:10.559960 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8550f028-bbd9-455c-9c93-0e813f2a95ed-libvirt-combined-ca-bundle\") pod \"8550f028-bbd9-455c-9c93-0e813f2a95ed\" (UID: \"8550f028-bbd9-455c-9c93-0e813f2a95ed\") " Oct 11 04:42:10 crc kubenswrapper[4798]: I1011 04:42:10.560139 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/8550f028-bbd9-455c-9c93-0e813f2a95ed-ssh-key\") pod \"8550f028-bbd9-455c-9c93-0e813f2a95ed\" (UID: \"8550f028-bbd9-455c-9c93-0e813f2a95ed\") " Oct 11 04:42:10 crc kubenswrapper[4798]: I1011 04:42:10.560262 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6ngl8\" (UniqueName: \"kubernetes.io/projected/8550f028-bbd9-455c-9c93-0e813f2a95ed-kube-api-access-6ngl8\") pod \"8550f028-bbd9-455c-9c93-0e813f2a95ed\" (UID: \"8550f028-bbd9-455c-9c93-0e813f2a95ed\") " Oct 11 04:42:10 crc kubenswrapper[4798]: I1011 04:42:10.560285 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/8550f028-bbd9-455c-9c93-0e813f2a95ed-libvirt-secret-0\") pod \"8550f028-bbd9-455c-9c93-0e813f2a95ed\" (UID: \"8550f028-bbd9-455c-9c93-0e813f2a95ed\") " Oct 11 04:42:10 crc kubenswrapper[4798]: I1011 04:42:10.560364 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8550f028-bbd9-455c-9c93-0e813f2a95ed-inventory\") pod \"8550f028-bbd9-455c-9c93-0e813f2a95ed\" (UID: \"8550f028-bbd9-455c-9c93-0e813f2a95ed\") " Oct 11 04:42:10 crc kubenswrapper[4798]: I1011 04:42:10.570040 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8550f028-bbd9-455c-9c93-0e813f2a95ed-kube-api-access-6ngl8" (OuterVolumeSpecName: "kube-api-access-6ngl8") pod "8550f028-bbd9-455c-9c93-0e813f2a95ed" (UID: "8550f028-bbd9-455c-9c93-0e813f2a95ed"). InnerVolumeSpecName "kube-api-access-6ngl8". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 04:42:10 crc kubenswrapper[4798]: I1011 04:42:10.571791 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8550f028-bbd9-455c-9c93-0e813f2a95ed-ceph" (OuterVolumeSpecName: "ceph") pod "8550f028-bbd9-455c-9c93-0e813f2a95ed" (UID: "8550f028-bbd9-455c-9c93-0e813f2a95ed"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:42:10 crc kubenswrapper[4798]: I1011 04:42:10.584092 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8550f028-bbd9-455c-9c93-0e813f2a95ed-libvirt-combined-ca-bundle" (OuterVolumeSpecName: "libvirt-combined-ca-bundle") pod "8550f028-bbd9-455c-9c93-0e813f2a95ed" (UID: "8550f028-bbd9-455c-9c93-0e813f2a95ed"). InnerVolumeSpecName "libvirt-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:42:10 crc kubenswrapper[4798]: I1011 04:42:10.600621 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8550f028-bbd9-455c-9c93-0e813f2a95ed-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "8550f028-bbd9-455c-9c93-0e813f2a95ed" (UID: "8550f028-bbd9-455c-9c93-0e813f2a95ed"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:42:10 crc kubenswrapper[4798]: I1011 04:42:10.604785 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8550f028-bbd9-455c-9c93-0e813f2a95ed-libvirt-secret-0" (OuterVolumeSpecName: "libvirt-secret-0") pod "8550f028-bbd9-455c-9c93-0e813f2a95ed" (UID: "8550f028-bbd9-455c-9c93-0e813f2a95ed"). InnerVolumeSpecName "libvirt-secret-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:42:10 crc kubenswrapper[4798]: I1011 04:42:10.606978 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8550f028-bbd9-455c-9c93-0e813f2a95ed-inventory" (OuterVolumeSpecName: "inventory") pod "8550f028-bbd9-455c-9c93-0e813f2a95ed" (UID: "8550f028-bbd9-455c-9c93-0e813f2a95ed"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:42:10 crc kubenswrapper[4798]: I1011 04:42:10.662724 4798 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8550f028-bbd9-455c-9c93-0e813f2a95ed-inventory\") on node \"crc\" DevicePath \"\"" Oct 11 04:42:10 crc kubenswrapper[4798]: I1011 04:42:10.662760 4798 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/8550f028-bbd9-455c-9c93-0e813f2a95ed-ceph\") on node \"crc\" DevicePath \"\"" Oct 11 04:42:10 crc kubenswrapper[4798]: I1011 04:42:10.662772 4798 reconciler_common.go:293] "Volume detached for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8550f028-bbd9-455c-9c93-0e813f2a95ed-libvirt-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 11 04:42:10 crc kubenswrapper[4798]: I1011 04:42:10.662785 4798 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/8550f028-bbd9-455c-9c93-0e813f2a95ed-ssh-key\") on node \"crc\" DevicePath \"\"" Oct 11 04:42:10 crc kubenswrapper[4798]: I1011 04:42:10.662798 4798 reconciler_common.go:293] "Volume detached for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/8550f028-bbd9-455c-9c93-0e813f2a95ed-libvirt-secret-0\") on node \"crc\" DevicePath \"\"" Oct 11 04:42:10 crc kubenswrapper[4798]: I1011 04:42:10.662812 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6ngl8\" (UniqueName: \"kubernetes.io/projected/8550f028-bbd9-455c-9c93-0e813f2a95ed-kube-api-access-6ngl8\") on node \"crc\" DevicePath \"\"" Oct 11 04:42:10 crc kubenswrapper[4798]: I1011 04:42:10.821488 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-649lj" event={"ID":"8550f028-bbd9-455c-9c93-0e813f2a95ed","Type":"ContainerDied","Data":"def858fe9c3019c87ce9083256d45903bebf258278cd7bbbc07362804d8070d4"} Oct 11 04:42:10 crc kubenswrapper[4798]: I1011 04:42:10.821937 4798 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="def858fe9c3019c87ce9083256d45903bebf258278cd7bbbc07362804d8070d4" Oct 11 04:42:10 crc kubenswrapper[4798]: I1011 04:42:10.821572 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-649lj" Oct 11 04:42:10 crc kubenswrapper[4798]: I1011 04:42:10.944903 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-p4572"] Oct 11 04:42:10 crc kubenswrapper[4798]: E1011 04:42:10.945466 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f754c8f6-7370-4cfb-9435-beb5c46c11b8" containerName="extract-utilities" Oct 11 04:42:10 crc kubenswrapper[4798]: I1011 04:42:10.945486 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="f754c8f6-7370-4cfb-9435-beb5c46c11b8" containerName="extract-utilities" Oct 11 04:42:10 crc kubenswrapper[4798]: E1011 04:42:10.945526 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f754c8f6-7370-4cfb-9435-beb5c46c11b8" containerName="extract-content" Oct 11 04:42:10 crc kubenswrapper[4798]: I1011 04:42:10.945533 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="f754c8f6-7370-4cfb-9435-beb5c46c11b8" containerName="extract-content" Oct 11 04:42:10 crc kubenswrapper[4798]: E1011 04:42:10.945547 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f754c8f6-7370-4cfb-9435-beb5c46c11b8" containerName="registry-server" Oct 11 04:42:10 crc kubenswrapper[4798]: I1011 04:42:10.945556 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="f754c8f6-7370-4cfb-9435-beb5c46c11b8" containerName="registry-server" Oct 11 04:42:10 crc kubenswrapper[4798]: E1011 04:42:10.945567 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8550f028-bbd9-455c-9c93-0e813f2a95ed" containerName="libvirt-edpm-deployment-openstack-edpm-ipam" Oct 11 04:42:10 crc kubenswrapper[4798]: I1011 04:42:10.945575 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="8550f028-bbd9-455c-9c93-0e813f2a95ed" containerName="libvirt-edpm-deployment-openstack-edpm-ipam" Oct 11 04:42:10 crc kubenswrapper[4798]: I1011 04:42:10.945799 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="8550f028-bbd9-455c-9c93-0e813f2a95ed" containerName="libvirt-edpm-deployment-openstack-edpm-ipam" Oct 11 04:42:10 crc kubenswrapper[4798]: I1011 04:42:10.945816 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="f754c8f6-7370-4cfb-9435-beb5c46c11b8" containerName="registry-server" Oct 11 04:42:10 crc kubenswrapper[4798]: I1011 04:42:10.946719 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-p4572" Oct 11 04:42:10 crc kubenswrapper[4798]: I1011 04:42:10.950931 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceph-conf-files" Oct 11 04:42:10 crc kubenswrapper[4798]: I1011 04:42:10.950951 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ceph-nova" Oct 11 04:42:10 crc kubenswrapper[4798]: I1011 04:42:10.950981 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-mgsh6" Oct 11 04:42:10 crc kubenswrapper[4798]: I1011 04:42:10.951149 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-migration-ssh-key" Oct 11 04:42:10 crc kubenswrapper[4798]: I1011 04:42:10.952954 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Oct 11 04:42:10 crc kubenswrapper[4798]: I1011 04:42:10.953047 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Oct 11 04:42:10 crc kubenswrapper[4798]: I1011 04:42:10.953172 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-compute-config" Oct 11 04:42:10 crc kubenswrapper[4798]: I1011 04:42:10.954133 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"nova-extra-config" Oct 11 04:42:10 crc kubenswrapper[4798]: I1011 04:42:10.954275 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Oct 11 04:42:10 crc kubenswrapper[4798]: I1011 04:42:10.967891 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-p4572"] Oct 11 04:42:11 crc kubenswrapper[4798]: I1011 04:42:11.076588 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/1a79dbc6-3c66-4728-8984-fecebe3eb6f6-ceph\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-p4572\" (UID: \"1a79dbc6-3c66-4728-8984-fecebe3eb6f6\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-p4572" Oct 11 04:42:11 crc kubenswrapper[4798]: I1011 04:42:11.076633 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/1a79dbc6-3c66-4728-8984-fecebe3eb6f6-nova-migration-ssh-key-0\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-p4572\" (UID: \"1a79dbc6-3c66-4728-8984-fecebe3eb6f6\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-p4572" Oct 11 04:42:11 crc kubenswrapper[4798]: I1011 04:42:11.076687 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-custom-ceph-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1a79dbc6-3c66-4728-8984-fecebe3eb6f6-nova-custom-ceph-combined-ca-bundle\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-p4572\" (UID: \"1a79dbc6-3c66-4728-8984-fecebe3eb6f6\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-p4572" Oct 11 04:42:11 crc kubenswrapper[4798]: I1011 04:42:11.076715 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph-nova-0\" (UniqueName: \"kubernetes.io/configmap/1a79dbc6-3c66-4728-8984-fecebe3eb6f6-ceph-nova-0\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-p4572\" (UID: \"1a79dbc6-3c66-4728-8984-fecebe3eb6f6\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-p4572" Oct 11 04:42:11 crc kubenswrapper[4798]: I1011 04:42:11.076737 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/1a79dbc6-3c66-4728-8984-fecebe3eb6f6-nova-migration-ssh-key-1\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-p4572\" (UID: \"1a79dbc6-3c66-4728-8984-fecebe3eb6f6\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-p4572" Oct 11 04:42:11 crc kubenswrapper[4798]: I1011 04:42:11.077041 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d84r5\" (UniqueName: \"kubernetes.io/projected/1a79dbc6-3c66-4728-8984-fecebe3eb6f6-kube-api-access-d84r5\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-p4572\" (UID: \"1a79dbc6-3c66-4728-8984-fecebe3eb6f6\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-p4572" Oct 11 04:42:11 crc kubenswrapper[4798]: I1011 04:42:11.077278 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/1a79dbc6-3c66-4728-8984-fecebe3eb6f6-nova-cell1-compute-config-1\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-p4572\" (UID: \"1a79dbc6-3c66-4728-8984-fecebe3eb6f6\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-p4572" Oct 11 04:42:11 crc kubenswrapper[4798]: I1011 04:42:11.077323 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/1a79dbc6-3c66-4728-8984-fecebe3eb6f6-ssh-key\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-p4572\" (UID: \"1a79dbc6-3c66-4728-8984-fecebe3eb6f6\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-p4572" Oct 11 04:42:11 crc kubenswrapper[4798]: I1011 04:42:11.077366 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1a79dbc6-3c66-4728-8984-fecebe3eb6f6-inventory\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-p4572\" (UID: \"1a79dbc6-3c66-4728-8984-fecebe3eb6f6\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-p4572" Oct 11 04:42:11 crc kubenswrapper[4798]: I1011 04:42:11.077721 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/1a79dbc6-3c66-4728-8984-fecebe3eb6f6-nova-extra-config-0\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-p4572\" (UID: \"1a79dbc6-3c66-4728-8984-fecebe3eb6f6\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-p4572" Oct 11 04:42:11 crc kubenswrapper[4798]: I1011 04:42:11.077826 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/1a79dbc6-3c66-4728-8984-fecebe3eb6f6-nova-cell1-compute-config-0\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-p4572\" (UID: \"1a79dbc6-3c66-4728-8984-fecebe3eb6f6\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-p4572" Oct 11 04:42:11 crc kubenswrapper[4798]: I1011 04:42:11.180603 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/1a79dbc6-3c66-4728-8984-fecebe3eb6f6-ceph\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-p4572\" (UID: \"1a79dbc6-3c66-4728-8984-fecebe3eb6f6\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-p4572" Oct 11 04:42:11 crc kubenswrapper[4798]: I1011 04:42:11.180673 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/1a79dbc6-3c66-4728-8984-fecebe3eb6f6-nova-migration-ssh-key-0\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-p4572\" (UID: \"1a79dbc6-3c66-4728-8984-fecebe3eb6f6\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-p4572" Oct 11 04:42:11 crc kubenswrapper[4798]: I1011 04:42:11.180736 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-custom-ceph-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1a79dbc6-3c66-4728-8984-fecebe3eb6f6-nova-custom-ceph-combined-ca-bundle\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-p4572\" (UID: \"1a79dbc6-3c66-4728-8984-fecebe3eb6f6\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-p4572" Oct 11 04:42:11 crc kubenswrapper[4798]: I1011 04:42:11.180769 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph-nova-0\" (UniqueName: \"kubernetes.io/configmap/1a79dbc6-3c66-4728-8984-fecebe3eb6f6-ceph-nova-0\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-p4572\" (UID: \"1a79dbc6-3c66-4728-8984-fecebe3eb6f6\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-p4572" Oct 11 04:42:11 crc kubenswrapper[4798]: I1011 04:42:11.180797 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/1a79dbc6-3c66-4728-8984-fecebe3eb6f6-nova-migration-ssh-key-1\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-p4572\" (UID: \"1a79dbc6-3c66-4728-8984-fecebe3eb6f6\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-p4572" Oct 11 04:42:11 crc kubenswrapper[4798]: I1011 04:42:11.180852 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d84r5\" (UniqueName: \"kubernetes.io/projected/1a79dbc6-3c66-4728-8984-fecebe3eb6f6-kube-api-access-d84r5\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-p4572\" (UID: \"1a79dbc6-3c66-4728-8984-fecebe3eb6f6\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-p4572" Oct 11 04:42:11 crc kubenswrapper[4798]: I1011 04:42:11.180912 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/1a79dbc6-3c66-4728-8984-fecebe3eb6f6-nova-cell1-compute-config-1\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-p4572\" (UID: \"1a79dbc6-3c66-4728-8984-fecebe3eb6f6\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-p4572" Oct 11 04:42:11 crc kubenswrapper[4798]: I1011 04:42:11.180936 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/1a79dbc6-3c66-4728-8984-fecebe3eb6f6-ssh-key\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-p4572\" (UID: \"1a79dbc6-3c66-4728-8984-fecebe3eb6f6\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-p4572" Oct 11 04:42:11 crc kubenswrapper[4798]: I1011 04:42:11.180962 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1a79dbc6-3c66-4728-8984-fecebe3eb6f6-inventory\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-p4572\" (UID: \"1a79dbc6-3c66-4728-8984-fecebe3eb6f6\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-p4572" Oct 11 04:42:11 crc kubenswrapper[4798]: I1011 04:42:11.181033 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/1a79dbc6-3c66-4728-8984-fecebe3eb6f6-nova-extra-config-0\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-p4572\" (UID: \"1a79dbc6-3c66-4728-8984-fecebe3eb6f6\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-p4572" Oct 11 04:42:11 crc kubenswrapper[4798]: I1011 04:42:11.181064 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/1a79dbc6-3c66-4728-8984-fecebe3eb6f6-nova-cell1-compute-config-0\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-p4572\" (UID: \"1a79dbc6-3c66-4728-8984-fecebe3eb6f6\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-p4572" Oct 11 04:42:11 crc kubenswrapper[4798]: I1011 04:42:11.183012 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph-nova-0\" (UniqueName: \"kubernetes.io/configmap/1a79dbc6-3c66-4728-8984-fecebe3eb6f6-ceph-nova-0\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-p4572\" (UID: \"1a79dbc6-3c66-4728-8984-fecebe3eb6f6\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-p4572" Oct 11 04:42:11 crc kubenswrapper[4798]: I1011 04:42:11.183021 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/1a79dbc6-3c66-4728-8984-fecebe3eb6f6-nova-extra-config-0\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-p4572\" (UID: \"1a79dbc6-3c66-4728-8984-fecebe3eb6f6\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-p4572" Oct 11 04:42:11 crc kubenswrapper[4798]: I1011 04:42:11.188692 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-custom-ceph-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1a79dbc6-3c66-4728-8984-fecebe3eb6f6-nova-custom-ceph-combined-ca-bundle\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-p4572\" (UID: \"1a79dbc6-3c66-4728-8984-fecebe3eb6f6\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-p4572" Oct 11 04:42:11 crc kubenswrapper[4798]: I1011 04:42:11.189369 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1a79dbc6-3c66-4728-8984-fecebe3eb6f6-inventory\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-p4572\" (UID: \"1a79dbc6-3c66-4728-8984-fecebe3eb6f6\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-p4572" Oct 11 04:42:11 crc kubenswrapper[4798]: I1011 04:42:11.189624 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/1a79dbc6-3c66-4728-8984-fecebe3eb6f6-nova-cell1-compute-config-1\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-p4572\" (UID: \"1a79dbc6-3c66-4728-8984-fecebe3eb6f6\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-p4572" Oct 11 04:42:11 crc kubenswrapper[4798]: I1011 04:42:11.189744 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/1a79dbc6-3c66-4728-8984-fecebe3eb6f6-nova-migration-ssh-key-1\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-p4572\" (UID: \"1a79dbc6-3c66-4728-8984-fecebe3eb6f6\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-p4572" Oct 11 04:42:11 crc kubenswrapper[4798]: I1011 04:42:11.191091 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/1a79dbc6-3c66-4728-8984-fecebe3eb6f6-nova-migration-ssh-key-0\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-p4572\" (UID: \"1a79dbc6-3c66-4728-8984-fecebe3eb6f6\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-p4572" Oct 11 04:42:11 crc kubenswrapper[4798]: I1011 04:42:11.191988 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/1a79dbc6-3c66-4728-8984-fecebe3eb6f6-ceph\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-p4572\" (UID: \"1a79dbc6-3c66-4728-8984-fecebe3eb6f6\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-p4572" Oct 11 04:42:11 crc kubenswrapper[4798]: I1011 04:42:11.193511 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/1a79dbc6-3c66-4728-8984-fecebe3eb6f6-ssh-key\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-p4572\" (UID: \"1a79dbc6-3c66-4728-8984-fecebe3eb6f6\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-p4572" Oct 11 04:42:11 crc kubenswrapper[4798]: I1011 04:42:11.195519 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/1a79dbc6-3c66-4728-8984-fecebe3eb6f6-nova-cell1-compute-config-0\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-p4572\" (UID: \"1a79dbc6-3c66-4728-8984-fecebe3eb6f6\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-p4572" Oct 11 04:42:11 crc kubenswrapper[4798]: I1011 04:42:11.217061 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d84r5\" (UniqueName: \"kubernetes.io/projected/1a79dbc6-3c66-4728-8984-fecebe3eb6f6-kube-api-access-d84r5\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-p4572\" (UID: \"1a79dbc6-3c66-4728-8984-fecebe3eb6f6\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-p4572" Oct 11 04:42:11 crc kubenswrapper[4798]: I1011 04:42:11.273348 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-p4572" Oct 11 04:42:11 crc kubenswrapper[4798]: I1011 04:42:11.768507 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-p4572"] Oct 11 04:42:11 crc kubenswrapper[4798]: I1011 04:42:11.833589 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-p4572" event={"ID":"1a79dbc6-3c66-4728-8984-fecebe3eb6f6","Type":"ContainerStarted","Data":"d3b5ec3182541623fe2bc58f72ff0faf85abc9241c6c6f576f59bb6edbb9c46a"} Oct 11 04:42:12 crc kubenswrapper[4798]: I1011 04:42:12.843158 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-p4572" event={"ID":"1a79dbc6-3c66-4728-8984-fecebe3eb6f6","Type":"ContainerStarted","Data":"d4b8cfe8121edf22284ee8438ddf52b2109886786d67a64c05ffb79bc70d467b"} Oct 11 04:42:12 crc kubenswrapper[4798]: I1011 04:42:12.873990 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-p4572" podStartSLOduration=2.470388561 podStartE2EDuration="2.8739604s" podCreationTimestamp="2025-10-11 04:42:10 +0000 UTC" firstStartedPulling="2025-10-11 04:42:11.783292952 +0000 UTC m=+2827.119582648" lastFinishedPulling="2025-10-11 04:42:12.186864771 +0000 UTC m=+2827.523154487" observedRunningTime="2025-10-11 04:42:12.866101684 +0000 UTC m=+2828.202391370" watchObservedRunningTime="2025-10-11 04:42:12.8739604 +0000 UTC m=+2828.210250086" Oct 11 04:42:13 crc kubenswrapper[4798]: I1011 04:42:13.639728 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-b4sds"] Oct 11 04:42:13 crc kubenswrapper[4798]: I1011 04:42:13.647792 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-b4sds" Oct 11 04:42:13 crc kubenswrapper[4798]: I1011 04:42:13.652576 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-b4sds"] Oct 11 04:42:13 crc kubenswrapper[4798]: I1011 04:42:13.673236 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/36f68387-6723-4f1c-891f-1fd0f6ee7ef9-utilities\") pod \"community-operators-b4sds\" (UID: \"36f68387-6723-4f1c-891f-1fd0f6ee7ef9\") " pod="openshift-marketplace/community-operators-b4sds" Oct 11 04:42:13 crc kubenswrapper[4798]: I1011 04:42:13.673534 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kvcs9\" (UniqueName: \"kubernetes.io/projected/36f68387-6723-4f1c-891f-1fd0f6ee7ef9-kube-api-access-kvcs9\") pod \"community-operators-b4sds\" (UID: \"36f68387-6723-4f1c-891f-1fd0f6ee7ef9\") " pod="openshift-marketplace/community-operators-b4sds" Oct 11 04:42:13 crc kubenswrapper[4798]: I1011 04:42:13.673668 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/36f68387-6723-4f1c-891f-1fd0f6ee7ef9-catalog-content\") pod \"community-operators-b4sds\" (UID: \"36f68387-6723-4f1c-891f-1fd0f6ee7ef9\") " pod="openshift-marketplace/community-operators-b4sds" Oct 11 04:42:13 crc kubenswrapper[4798]: I1011 04:42:13.777429 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/36f68387-6723-4f1c-891f-1fd0f6ee7ef9-utilities\") pod \"community-operators-b4sds\" (UID: \"36f68387-6723-4f1c-891f-1fd0f6ee7ef9\") " pod="openshift-marketplace/community-operators-b4sds" Oct 11 04:42:13 crc kubenswrapper[4798]: I1011 04:42:13.777541 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kvcs9\" (UniqueName: \"kubernetes.io/projected/36f68387-6723-4f1c-891f-1fd0f6ee7ef9-kube-api-access-kvcs9\") pod \"community-operators-b4sds\" (UID: \"36f68387-6723-4f1c-891f-1fd0f6ee7ef9\") " pod="openshift-marketplace/community-operators-b4sds" Oct 11 04:42:13 crc kubenswrapper[4798]: I1011 04:42:13.777591 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/36f68387-6723-4f1c-891f-1fd0f6ee7ef9-catalog-content\") pod \"community-operators-b4sds\" (UID: \"36f68387-6723-4f1c-891f-1fd0f6ee7ef9\") " pod="openshift-marketplace/community-operators-b4sds" Oct 11 04:42:13 crc kubenswrapper[4798]: I1011 04:42:13.778302 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/36f68387-6723-4f1c-891f-1fd0f6ee7ef9-catalog-content\") pod \"community-operators-b4sds\" (UID: \"36f68387-6723-4f1c-891f-1fd0f6ee7ef9\") " pod="openshift-marketplace/community-operators-b4sds" Oct 11 04:42:13 crc kubenswrapper[4798]: I1011 04:42:13.778642 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/36f68387-6723-4f1c-891f-1fd0f6ee7ef9-utilities\") pod \"community-operators-b4sds\" (UID: \"36f68387-6723-4f1c-891f-1fd0f6ee7ef9\") " pod="openshift-marketplace/community-operators-b4sds" Oct 11 04:42:13 crc kubenswrapper[4798]: I1011 04:42:13.803983 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kvcs9\" (UniqueName: \"kubernetes.io/projected/36f68387-6723-4f1c-891f-1fd0f6ee7ef9-kube-api-access-kvcs9\") pod \"community-operators-b4sds\" (UID: \"36f68387-6723-4f1c-891f-1fd0f6ee7ef9\") " pod="openshift-marketplace/community-operators-b4sds" Oct 11 04:42:13 crc kubenswrapper[4798]: I1011 04:42:13.978520 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-b4sds" Oct 11 04:42:14 crc kubenswrapper[4798]: I1011 04:42:14.514125 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-b4sds"] Oct 11 04:42:14 crc kubenswrapper[4798]: I1011 04:42:14.878029 4798 generic.go:334] "Generic (PLEG): container finished" podID="36f68387-6723-4f1c-891f-1fd0f6ee7ef9" containerID="4227995beaa2a4462ac9074f2257379ce0a1075f1177de86359fff907d592a0f" exitCode=0 Oct 11 04:42:14 crc kubenswrapper[4798]: I1011 04:42:14.878124 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-b4sds" event={"ID":"36f68387-6723-4f1c-891f-1fd0f6ee7ef9","Type":"ContainerDied","Data":"4227995beaa2a4462ac9074f2257379ce0a1075f1177de86359fff907d592a0f"} Oct 11 04:42:14 crc kubenswrapper[4798]: I1011 04:42:14.878181 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-b4sds" event={"ID":"36f68387-6723-4f1c-891f-1fd0f6ee7ef9","Type":"ContainerStarted","Data":"65fddc761f123c560290a877eddb9f054b9bcc0b8c9a2723fdbb9566de15403d"} Oct 11 04:42:15 crc kubenswrapper[4798]: I1011 04:42:15.888868 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-b4sds" event={"ID":"36f68387-6723-4f1c-891f-1fd0f6ee7ef9","Type":"ContainerStarted","Data":"8e52e88746575e8dd56a3a135fb2fba45b73304fe1aabd0fb2d5d307e21036d9"} Oct 11 04:42:16 crc kubenswrapper[4798]: I1011 04:42:16.905801 4798 generic.go:334] "Generic (PLEG): container finished" podID="36f68387-6723-4f1c-891f-1fd0f6ee7ef9" containerID="8e52e88746575e8dd56a3a135fb2fba45b73304fe1aabd0fb2d5d307e21036d9" exitCode=0 Oct 11 04:42:16 crc kubenswrapper[4798]: I1011 04:42:16.906168 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-b4sds" event={"ID":"36f68387-6723-4f1c-891f-1fd0f6ee7ef9","Type":"ContainerDied","Data":"8e52e88746575e8dd56a3a135fb2fba45b73304fe1aabd0fb2d5d307e21036d9"} Oct 11 04:42:17 crc kubenswrapper[4798]: I1011 04:42:17.920777 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-b4sds" event={"ID":"36f68387-6723-4f1c-891f-1fd0f6ee7ef9","Type":"ContainerStarted","Data":"be679a7f86368681aa17d4de8acd38e217951a579ca28e81c7316cbbad4bfeba"} Oct 11 04:42:17 crc kubenswrapper[4798]: I1011 04:42:17.958425 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-b4sds" podStartSLOduration=2.344908978 podStartE2EDuration="4.958374323s" podCreationTimestamp="2025-10-11 04:42:13 +0000 UTC" firstStartedPulling="2025-10-11 04:42:14.881944573 +0000 UTC m=+2830.218234299" lastFinishedPulling="2025-10-11 04:42:17.495409958 +0000 UTC m=+2832.831699644" observedRunningTime="2025-10-11 04:42:17.943918803 +0000 UTC m=+2833.280208509" watchObservedRunningTime="2025-10-11 04:42:17.958374323 +0000 UTC m=+2833.294664019" Oct 11 04:42:23 crc kubenswrapper[4798]: I1011 04:42:23.978739 4798 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-b4sds" Oct 11 04:42:23 crc kubenswrapper[4798]: I1011 04:42:23.979378 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-b4sds" Oct 11 04:42:24 crc kubenswrapper[4798]: I1011 04:42:24.052639 4798 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-b4sds" Oct 11 04:42:25 crc kubenswrapper[4798]: I1011 04:42:25.092920 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-b4sds" Oct 11 04:42:25 crc kubenswrapper[4798]: I1011 04:42:25.169074 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-b4sds"] Oct 11 04:42:27 crc kubenswrapper[4798]: I1011 04:42:27.021607 4798 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-b4sds" podUID="36f68387-6723-4f1c-891f-1fd0f6ee7ef9" containerName="registry-server" containerID="cri-o://be679a7f86368681aa17d4de8acd38e217951a579ca28e81c7316cbbad4bfeba" gracePeriod=2 Oct 11 04:42:27 crc kubenswrapper[4798]: I1011 04:42:27.138285 4798 patch_prober.go:28] interesting pod/machine-config-daemon-h28s2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 11 04:42:27 crc kubenswrapper[4798]: I1011 04:42:27.138364 4798 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 11 04:42:27 crc kubenswrapper[4798]: I1011 04:42:27.138450 4798 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" Oct 11 04:42:27 crc kubenswrapper[4798]: I1011 04:42:27.139257 4798 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"999be6c75e4c41e3d1de7a3a50ee34084c0f4ead3f5f5fdebf32b1d0054c6b5f"} pod="openshift-machine-config-operator/machine-config-daemon-h28s2" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Oct 11 04:42:27 crc kubenswrapper[4798]: I1011 04:42:27.139312 4798 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" containerName="machine-config-daemon" containerID="cri-o://999be6c75e4c41e3d1de7a3a50ee34084c0f4ead3f5f5fdebf32b1d0054c6b5f" gracePeriod=600 Oct 11 04:42:27 crc kubenswrapper[4798]: E1011 04:42:27.289977 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h28s2_openshift-machine-config-operator(42571bc8-2186-4e3b-bba9-28f5a8f364d0)\"" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" Oct 11 04:42:27 crc kubenswrapper[4798]: I1011 04:42:27.670647 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-b4sds" Oct 11 04:42:27 crc kubenswrapper[4798]: I1011 04:42:27.802074 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/36f68387-6723-4f1c-891f-1fd0f6ee7ef9-utilities\") pod \"36f68387-6723-4f1c-891f-1fd0f6ee7ef9\" (UID: \"36f68387-6723-4f1c-891f-1fd0f6ee7ef9\") " Oct 11 04:42:27 crc kubenswrapper[4798]: I1011 04:42:27.802158 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kvcs9\" (UniqueName: \"kubernetes.io/projected/36f68387-6723-4f1c-891f-1fd0f6ee7ef9-kube-api-access-kvcs9\") pod \"36f68387-6723-4f1c-891f-1fd0f6ee7ef9\" (UID: \"36f68387-6723-4f1c-891f-1fd0f6ee7ef9\") " Oct 11 04:42:27 crc kubenswrapper[4798]: I1011 04:42:27.802421 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/36f68387-6723-4f1c-891f-1fd0f6ee7ef9-catalog-content\") pod \"36f68387-6723-4f1c-891f-1fd0f6ee7ef9\" (UID: \"36f68387-6723-4f1c-891f-1fd0f6ee7ef9\") " Oct 11 04:42:27 crc kubenswrapper[4798]: I1011 04:42:27.803323 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/36f68387-6723-4f1c-891f-1fd0f6ee7ef9-utilities" (OuterVolumeSpecName: "utilities") pod "36f68387-6723-4f1c-891f-1fd0f6ee7ef9" (UID: "36f68387-6723-4f1c-891f-1fd0f6ee7ef9"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 04:42:27 crc kubenswrapper[4798]: I1011 04:42:27.804718 4798 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/36f68387-6723-4f1c-891f-1fd0f6ee7ef9-utilities\") on node \"crc\" DevicePath \"\"" Oct 11 04:42:27 crc kubenswrapper[4798]: I1011 04:42:27.814643 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/36f68387-6723-4f1c-891f-1fd0f6ee7ef9-kube-api-access-kvcs9" (OuterVolumeSpecName: "kube-api-access-kvcs9") pod "36f68387-6723-4f1c-891f-1fd0f6ee7ef9" (UID: "36f68387-6723-4f1c-891f-1fd0f6ee7ef9"). InnerVolumeSpecName "kube-api-access-kvcs9". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 04:42:27 crc kubenswrapper[4798]: I1011 04:42:27.862473 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/36f68387-6723-4f1c-891f-1fd0f6ee7ef9-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "36f68387-6723-4f1c-891f-1fd0f6ee7ef9" (UID: "36f68387-6723-4f1c-891f-1fd0f6ee7ef9"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 04:42:27 crc kubenswrapper[4798]: I1011 04:42:27.906818 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kvcs9\" (UniqueName: \"kubernetes.io/projected/36f68387-6723-4f1c-891f-1fd0f6ee7ef9-kube-api-access-kvcs9\") on node \"crc\" DevicePath \"\"" Oct 11 04:42:27 crc kubenswrapper[4798]: I1011 04:42:27.906870 4798 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/36f68387-6723-4f1c-891f-1fd0f6ee7ef9-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 11 04:42:28 crc kubenswrapper[4798]: I1011 04:42:28.039577 4798 generic.go:334] "Generic (PLEG): container finished" podID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" containerID="999be6c75e4c41e3d1de7a3a50ee34084c0f4ead3f5f5fdebf32b1d0054c6b5f" exitCode=0 Oct 11 04:42:28 crc kubenswrapper[4798]: I1011 04:42:28.039719 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" event={"ID":"42571bc8-2186-4e3b-bba9-28f5a8f364d0","Type":"ContainerDied","Data":"999be6c75e4c41e3d1de7a3a50ee34084c0f4ead3f5f5fdebf32b1d0054c6b5f"} Oct 11 04:42:28 crc kubenswrapper[4798]: I1011 04:42:28.039807 4798 scope.go:117] "RemoveContainer" containerID="cfcd870e30121e7648d377b94f1af550547a76a44c2be1c2cfee763d9b8be3df" Oct 11 04:42:28 crc kubenswrapper[4798]: I1011 04:42:28.040812 4798 scope.go:117] "RemoveContainer" containerID="999be6c75e4c41e3d1de7a3a50ee34084c0f4ead3f5f5fdebf32b1d0054c6b5f" Oct 11 04:42:28 crc kubenswrapper[4798]: E1011 04:42:28.041445 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h28s2_openshift-machine-config-operator(42571bc8-2186-4e3b-bba9-28f5a8f364d0)\"" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" Oct 11 04:42:28 crc kubenswrapper[4798]: I1011 04:42:28.044924 4798 generic.go:334] "Generic (PLEG): container finished" podID="36f68387-6723-4f1c-891f-1fd0f6ee7ef9" containerID="be679a7f86368681aa17d4de8acd38e217951a579ca28e81c7316cbbad4bfeba" exitCode=0 Oct 11 04:42:28 crc kubenswrapper[4798]: I1011 04:42:28.045008 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-b4sds" event={"ID":"36f68387-6723-4f1c-891f-1fd0f6ee7ef9","Type":"ContainerDied","Data":"be679a7f86368681aa17d4de8acd38e217951a579ca28e81c7316cbbad4bfeba"} Oct 11 04:42:28 crc kubenswrapper[4798]: I1011 04:42:28.045073 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-b4sds" event={"ID":"36f68387-6723-4f1c-891f-1fd0f6ee7ef9","Type":"ContainerDied","Data":"65fddc761f123c560290a877eddb9f054b9bcc0b8c9a2723fdbb9566de15403d"} Oct 11 04:42:28 crc kubenswrapper[4798]: I1011 04:42:28.045012 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-b4sds" Oct 11 04:42:28 crc kubenswrapper[4798]: I1011 04:42:28.108635 4798 scope.go:117] "RemoveContainer" containerID="be679a7f86368681aa17d4de8acd38e217951a579ca28e81c7316cbbad4bfeba" Oct 11 04:42:28 crc kubenswrapper[4798]: I1011 04:42:28.118382 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-b4sds"] Oct 11 04:42:28 crc kubenswrapper[4798]: I1011 04:42:28.139911 4798 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-b4sds"] Oct 11 04:42:28 crc kubenswrapper[4798]: I1011 04:42:28.145466 4798 scope.go:117] "RemoveContainer" containerID="8e52e88746575e8dd56a3a135fb2fba45b73304fe1aabd0fb2d5d307e21036d9" Oct 11 04:42:28 crc kubenswrapper[4798]: I1011 04:42:28.182128 4798 scope.go:117] "RemoveContainer" containerID="4227995beaa2a4462ac9074f2257379ce0a1075f1177de86359fff907d592a0f" Oct 11 04:42:28 crc kubenswrapper[4798]: I1011 04:42:28.206103 4798 scope.go:117] "RemoveContainer" containerID="be679a7f86368681aa17d4de8acd38e217951a579ca28e81c7316cbbad4bfeba" Oct 11 04:42:28 crc kubenswrapper[4798]: E1011 04:42:28.206897 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"be679a7f86368681aa17d4de8acd38e217951a579ca28e81c7316cbbad4bfeba\": container with ID starting with be679a7f86368681aa17d4de8acd38e217951a579ca28e81c7316cbbad4bfeba not found: ID does not exist" containerID="be679a7f86368681aa17d4de8acd38e217951a579ca28e81c7316cbbad4bfeba" Oct 11 04:42:28 crc kubenswrapper[4798]: I1011 04:42:28.206975 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"be679a7f86368681aa17d4de8acd38e217951a579ca28e81c7316cbbad4bfeba"} err="failed to get container status \"be679a7f86368681aa17d4de8acd38e217951a579ca28e81c7316cbbad4bfeba\": rpc error: code = NotFound desc = could not find container \"be679a7f86368681aa17d4de8acd38e217951a579ca28e81c7316cbbad4bfeba\": container with ID starting with be679a7f86368681aa17d4de8acd38e217951a579ca28e81c7316cbbad4bfeba not found: ID does not exist" Oct 11 04:42:28 crc kubenswrapper[4798]: I1011 04:42:28.207026 4798 scope.go:117] "RemoveContainer" containerID="8e52e88746575e8dd56a3a135fb2fba45b73304fe1aabd0fb2d5d307e21036d9" Oct 11 04:42:28 crc kubenswrapper[4798]: E1011 04:42:28.207594 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8e52e88746575e8dd56a3a135fb2fba45b73304fe1aabd0fb2d5d307e21036d9\": container with ID starting with 8e52e88746575e8dd56a3a135fb2fba45b73304fe1aabd0fb2d5d307e21036d9 not found: ID does not exist" containerID="8e52e88746575e8dd56a3a135fb2fba45b73304fe1aabd0fb2d5d307e21036d9" Oct 11 04:42:28 crc kubenswrapper[4798]: I1011 04:42:28.207688 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8e52e88746575e8dd56a3a135fb2fba45b73304fe1aabd0fb2d5d307e21036d9"} err="failed to get container status \"8e52e88746575e8dd56a3a135fb2fba45b73304fe1aabd0fb2d5d307e21036d9\": rpc error: code = NotFound desc = could not find container \"8e52e88746575e8dd56a3a135fb2fba45b73304fe1aabd0fb2d5d307e21036d9\": container with ID starting with 8e52e88746575e8dd56a3a135fb2fba45b73304fe1aabd0fb2d5d307e21036d9 not found: ID does not exist" Oct 11 04:42:28 crc kubenswrapper[4798]: I1011 04:42:28.207732 4798 scope.go:117] "RemoveContainer" containerID="4227995beaa2a4462ac9074f2257379ce0a1075f1177de86359fff907d592a0f" Oct 11 04:42:28 crc kubenswrapper[4798]: E1011 04:42:28.208455 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4227995beaa2a4462ac9074f2257379ce0a1075f1177de86359fff907d592a0f\": container with ID starting with 4227995beaa2a4462ac9074f2257379ce0a1075f1177de86359fff907d592a0f not found: ID does not exist" containerID="4227995beaa2a4462ac9074f2257379ce0a1075f1177de86359fff907d592a0f" Oct 11 04:42:28 crc kubenswrapper[4798]: I1011 04:42:28.208485 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4227995beaa2a4462ac9074f2257379ce0a1075f1177de86359fff907d592a0f"} err="failed to get container status \"4227995beaa2a4462ac9074f2257379ce0a1075f1177de86359fff907d592a0f\": rpc error: code = NotFound desc = could not find container \"4227995beaa2a4462ac9074f2257379ce0a1075f1177de86359fff907d592a0f\": container with ID starting with 4227995beaa2a4462ac9074f2257379ce0a1075f1177de86359fff907d592a0f not found: ID does not exist" Oct 11 04:42:29 crc kubenswrapper[4798]: I1011 04:42:29.444652 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="36f68387-6723-4f1c-891f-1fd0f6ee7ef9" path="/var/lib/kubelet/pods/36f68387-6723-4f1c-891f-1fd0f6ee7ef9/volumes" Oct 11 04:42:41 crc kubenswrapper[4798]: I1011 04:42:41.425362 4798 scope.go:117] "RemoveContainer" containerID="999be6c75e4c41e3d1de7a3a50ee34084c0f4ead3f5f5fdebf32b1d0054c6b5f" Oct 11 04:42:41 crc kubenswrapper[4798]: E1011 04:42:41.427224 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h28s2_openshift-machine-config-operator(42571bc8-2186-4e3b-bba9-28f5a8f364d0)\"" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" Oct 11 04:42:49 crc kubenswrapper[4798]: I1011 04:42:49.565699 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-nwqlg"] Oct 11 04:42:49 crc kubenswrapper[4798]: E1011 04:42:49.568418 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="36f68387-6723-4f1c-891f-1fd0f6ee7ef9" containerName="registry-server" Oct 11 04:42:49 crc kubenswrapper[4798]: I1011 04:42:49.568527 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="36f68387-6723-4f1c-891f-1fd0f6ee7ef9" containerName="registry-server" Oct 11 04:42:49 crc kubenswrapper[4798]: E1011 04:42:49.568805 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="36f68387-6723-4f1c-891f-1fd0f6ee7ef9" containerName="extract-utilities" Oct 11 04:42:49 crc kubenswrapper[4798]: I1011 04:42:49.569073 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="36f68387-6723-4f1c-891f-1fd0f6ee7ef9" containerName="extract-utilities" Oct 11 04:42:49 crc kubenswrapper[4798]: E1011 04:42:49.569228 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="36f68387-6723-4f1c-891f-1fd0f6ee7ef9" containerName="extract-content" Oct 11 04:42:49 crc kubenswrapper[4798]: I1011 04:42:49.569338 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="36f68387-6723-4f1c-891f-1fd0f6ee7ef9" containerName="extract-content" Oct 11 04:42:49 crc kubenswrapper[4798]: I1011 04:42:49.569901 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="36f68387-6723-4f1c-891f-1fd0f6ee7ef9" containerName="registry-server" Oct 11 04:42:49 crc kubenswrapper[4798]: I1011 04:42:49.573016 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-nwqlg" Oct 11 04:42:49 crc kubenswrapper[4798]: I1011 04:42:49.586802 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-nwqlg"] Oct 11 04:42:49 crc kubenswrapper[4798]: I1011 04:42:49.663128 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/94f510a4-1aff-421e-915d-4215163b7d94-utilities\") pod \"redhat-marketplace-nwqlg\" (UID: \"94f510a4-1aff-421e-915d-4215163b7d94\") " pod="openshift-marketplace/redhat-marketplace-nwqlg" Oct 11 04:42:49 crc kubenswrapper[4798]: I1011 04:42:49.663243 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/94f510a4-1aff-421e-915d-4215163b7d94-catalog-content\") pod \"redhat-marketplace-nwqlg\" (UID: \"94f510a4-1aff-421e-915d-4215163b7d94\") " pod="openshift-marketplace/redhat-marketplace-nwqlg" Oct 11 04:42:49 crc kubenswrapper[4798]: I1011 04:42:49.663289 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9c9d6\" (UniqueName: \"kubernetes.io/projected/94f510a4-1aff-421e-915d-4215163b7d94-kube-api-access-9c9d6\") pod \"redhat-marketplace-nwqlg\" (UID: \"94f510a4-1aff-421e-915d-4215163b7d94\") " pod="openshift-marketplace/redhat-marketplace-nwqlg" Oct 11 04:42:49 crc kubenswrapper[4798]: I1011 04:42:49.765296 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/94f510a4-1aff-421e-915d-4215163b7d94-utilities\") pod \"redhat-marketplace-nwqlg\" (UID: \"94f510a4-1aff-421e-915d-4215163b7d94\") " pod="openshift-marketplace/redhat-marketplace-nwqlg" Oct 11 04:42:49 crc kubenswrapper[4798]: I1011 04:42:49.765387 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/94f510a4-1aff-421e-915d-4215163b7d94-catalog-content\") pod \"redhat-marketplace-nwqlg\" (UID: \"94f510a4-1aff-421e-915d-4215163b7d94\") " pod="openshift-marketplace/redhat-marketplace-nwqlg" Oct 11 04:42:49 crc kubenswrapper[4798]: I1011 04:42:49.765441 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9c9d6\" (UniqueName: \"kubernetes.io/projected/94f510a4-1aff-421e-915d-4215163b7d94-kube-api-access-9c9d6\") pod \"redhat-marketplace-nwqlg\" (UID: \"94f510a4-1aff-421e-915d-4215163b7d94\") " pod="openshift-marketplace/redhat-marketplace-nwqlg" Oct 11 04:42:49 crc kubenswrapper[4798]: I1011 04:42:49.766672 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/94f510a4-1aff-421e-915d-4215163b7d94-utilities\") pod \"redhat-marketplace-nwqlg\" (UID: \"94f510a4-1aff-421e-915d-4215163b7d94\") " pod="openshift-marketplace/redhat-marketplace-nwqlg" Oct 11 04:42:49 crc kubenswrapper[4798]: I1011 04:42:49.768338 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/94f510a4-1aff-421e-915d-4215163b7d94-catalog-content\") pod \"redhat-marketplace-nwqlg\" (UID: \"94f510a4-1aff-421e-915d-4215163b7d94\") " pod="openshift-marketplace/redhat-marketplace-nwqlg" Oct 11 04:42:49 crc kubenswrapper[4798]: I1011 04:42:49.807904 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9c9d6\" (UniqueName: \"kubernetes.io/projected/94f510a4-1aff-421e-915d-4215163b7d94-kube-api-access-9c9d6\") pod \"redhat-marketplace-nwqlg\" (UID: \"94f510a4-1aff-421e-915d-4215163b7d94\") " pod="openshift-marketplace/redhat-marketplace-nwqlg" Oct 11 04:42:49 crc kubenswrapper[4798]: I1011 04:42:49.915782 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-nwqlg" Oct 11 04:42:50 crc kubenswrapper[4798]: I1011 04:42:50.445415 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-nwqlg"] Oct 11 04:42:51 crc kubenswrapper[4798]: I1011 04:42:51.412791 4798 generic.go:334] "Generic (PLEG): container finished" podID="94f510a4-1aff-421e-915d-4215163b7d94" containerID="57194dd641d1ed00d71a80b72bb4bb1d72d7854300530ff9b3139a758b5901e7" exitCode=0 Oct 11 04:42:51 crc kubenswrapper[4798]: I1011 04:42:51.412848 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-nwqlg" event={"ID":"94f510a4-1aff-421e-915d-4215163b7d94","Type":"ContainerDied","Data":"57194dd641d1ed00d71a80b72bb4bb1d72d7854300530ff9b3139a758b5901e7"} Oct 11 04:42:51 crc kubenswrapper[4798]: I1011 04:42:51.413257 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-nwqlg" event={"ID":"94f510a4-1aff-421e-915d-4215163b7d94","Type":"ContainerStarted","Data":"6d7bd12e82ac5adfb0cf70bdd81d7e1fd7b39e29ed37169b248c48f15c9fe9c3"} Oct 11 04:42:52 crc kubenswrapper[4798]: I1011 04:42:52.424466 4798 scope.go:117] "RemoveContainer" containerID="999be6c75e4c41e3d1de7a3a50ee34084c0f4ead3f5f5fdebf32b1d0054c6b5f" Oct 11 04:42:52 crc kubenswrapper[4798]: E1011 04:42:52.425497 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h28s2_openshift-machine-config-operator(42571bc8-2186-4e3b-bba9-28f5a8f364d0)\"" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" Oct 11 04:42:52 crc kubenswrapper[4798]: I1011 04:42:52.427164 4798 generic.go:334] "Generic (PLEG): container finished" podID="94f510a4-1aff-421e-915d-4215163b7d94" containerID="821d3793547d37ca9e774ebed9dc98b68b879fd57f945cb537084ff118a7a97b" exitCode=0 Oct 11 04:42:52 crc kubenswrapper[4798]: I1011 04:42:52.427213 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-nwqlg" event={"ID":"94f510a4-1aff-421e-915d-4215163b7d94","Type":"ContainerDied","Data":"821d3793547d37ca9e774ebed9dc98b68b879fd57f945cb537084ff118a7a97b"} Oct 11 04:42:53 crc kubenswrapper[4798]: I1011 04:42:53.444220 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-nwqlg" event={"ID":"94f510a4-1aff-421e-915d-4215163b7d94","Type":"ContainerStarted","Data":"bb96101203c57a3974ffee88e73693d1c5a99c81f8ce510125bd959dde904858"} Oct 11 04:42:53 crc kubenswrapper[4798]: I1011 04:42:53.483059 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-nwqlg" podStartSLOduration=3.030513906 podStartE2EDuration="4.483033933s" podCreationTimestamp="2025-10-11 04:42:49 +0000 UTC" firstStartedPulling="2025-10-11 04:42:51.414851205 +0000 UTC m=+2866.751140891" lastFinishedPulling="2025-10-11 04:42:52.867371202 +0000 UTC m=+2868.203660918" observedRunningTime="2025-10-11 04:42:53.471782399 +0000 UTC m=+2868.808072105" watchObservedRunningTime="2025-10-11 04:42:53.483033933 +0000 UTC m=+2868.819323629" Oct 11 04:42:59 crc kubenswrapper[4798]: I1011 04:42:59.916285 4798 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-nwqlg" Oct 11 04:42:59 crc kubenswrapper[4798]: I1011 04:42:59.917418 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-nwqlg" Oct 11 04:43:00 crc kubenswrapper[4798]: I1011 04:43:00.007205 4798 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-nwqlg" Oct 11 04:43:00 crc kubenswrapper[4798]: I1011 04:43:00.606793 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-nwqlg" Oct 11 04:43:00 crc kubenswrapper[4798]: I1011 04:43:00.675134 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-nwqlg"] Oct 11 04:43:02 crc kubenswrapper[4798]: I1011 04:43:02.560315 4798 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-nwqlg" podUID="94f510a4-1aff-421e-915d-4215163b7d94" containerName="registry-server" containerID="cri-o://bb96101203c57a3974ffee88e73693d1c5a99c81f8ce510125bd959dde904858" gracePeriod=2 Oct 11 04:43:03 crc kubenswrapper[4798]: I1011 04:43:03.168708 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-nwqlg" Oct 11 04:43:03 crc kubenswrapper[4798]: I1011 04:43:03.255268 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9c9d6\" (UniqueName: \"kubernetes.io/projected/94f510a4-1aff-421e-915d-4215163b7d94-kube-api-access-9c9d6\") pod \"94f510a4-1aff-421e-915d-4215163b7d94\" (UID: \"94f510a4-1aff-421e-915d-4215163b7d94\") " Oct 11 04:43:03 crc kubenswrapper[4798]: I1011 04:43:03.255843 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/94f510a4-1aff-421e-915d-4215163b7d94-utilities\") pod \"94f510a4-1aff-421e-915d-4215163b7d94\" (UID: \"94f510a4-1aff-421e-915d-4215163b7d94\") " Oct 11 04:43:03 crc kubenswrapper[4798]: I1011 04:43:03.255984 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/94f510a4-1aff-421e-915d-4215163b7d94-catalog-content\") pod \"94f510a4-1aff-421e-915d-4215163b7d94\" (UID: \"94f510a4-1aff-421e-915d-4215163b7d94\") " Oct 11 04:43:03 crc kubenswrapper[4798]: I1011 04:43:03.257540 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/94f510a4-1aff-421e-915d-4215163b7d94-utilities" (OuterVolumeSpecName: "utilities") pod "94f510a4-1aff-421e-915d-4215163b7d94" (UID: "94f510a4-1aff-421e-915d-4215163b7d94"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 04:43:03 crc kubenswrapper[4798]: I1011 04:43:03.265285 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/94f510a4-1aff-421e-915d-4215163b7d94-kube-api-access-9c9d6" (OuterVolumeSpecName: "kube-api-access-9c9d6") pod "94f510a4-1aff-421e-915d-4215163b7d94" (UID: "94f510a4-1aff-421e-915d-4215163b7d94"). InnerVolumeSpecName "kube-api-access-9c9d6". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 04:43:03 crc kubenswrapper[4798]: I1011 04:43:03.277925 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/94f510a4-1aff-421e-915d-4215163b7d94-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "94f510a4-1aff-421e-915d-4215163b7d94" (UID: "94f510a4-1aff-421e-915d-4215163b7d94"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 04:43:03 crc kubenswrapper[4798]: I1011 04:43:03.358971 4798 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/94f510a4-1aff-421e-915d-4215163b7d94-utilities\") on node \"crc\" DevicePath \"\"" Oct 11 04:43:03 crc kubenswrapper[4798]: I1011 04:43:03.359062 4798 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/94f510a4-1aff-421e-915d-4215163b7d94-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 11 04:43:03 crc kubenswrapper[4798]: I1011 04:43:03.359081 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9c9d6\" (UniqueName: \"kubernetes.io/projected/94f510a4-1aff-421e-915d-4215163b7d94-kube-api-access-9c9d6\") on node \"crc\" DevicePath \"\"" Oct 11 04:43:03 crc kubenswrapper[4798]: I1011 04:43:03.588134 4798 generic.go:334] "Generic (PLEG): container finished" podID="94f510a4-1aff-421e-915d-4215163b7d94" containerID="bb96101203c57a3974ffee88e73693d1c5a99c81f8ce510125bd959dde904858" exitCode=0 Oct 11 04:43:03 crc kubenswrapper[4798]: I1011 04:43:03.588200 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-nwqlg" event={"ID":"94f510a4-1aff-421e-915d-4215163b7d94","Type":"ContainerDied","Data":"bb96101203c57a3974ffee88e73693d1c5a99c81f8ce510125bd959dde904858"} Oct 11 04:43:03 crc kubenswrapper[4798]: I1011 04:43:03.588234 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-nwqlg" Oct 11 04:43:03 crc kubenswrapper[4798]: I1011 04:43:03.588258 4798 scope.go:117] "RemoveContainer" containerID="bb96101203c57a3974ffee88e73693d1c5a99c81f8ce510125bd959dde904858" Oct 11 04:43:03 crc kubenswrapper[4798]: I1011 04:43:03.588242 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-nwqlg" event={"ID":"94f510a4-1aff-421e-915d-4215163b7d94","Type":"ContainerDied","Data":"6d7bd12e82ac5adfb0cf70bdd81d7e1fd7b39e29ed37169b248c48f15c9fe9c3"} Oct 11 04:43:03 crc kubenswrapper[4798]: I1011 04:43:03.620989 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-nwqlg"] Oct 11 04:43:03 crc kubenswrapper[4798]: I1011 04:43:03.631680 4798 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-nwqlg"] Oct 11 04:43:03 crc kubenswrapper[4798]: I1011 04:43:03.636957 4798 scope.go:117] "RemoveContainer" containerID="821d3793547d37ca9e774ebed9dc98b68b879fd57f945cb537084ff118a7a97b" Oct 11 04:43:03 crc kubenswrapper[4798]: I1011 04:43:03.668484 4798 scope.go:117] "RemoveContainer" containerID="57194dd641d1ed00d71a80b72bb4bb1d72d7854300530ff9b3139a758b5901e7" Oct 11 04:43:03 crc kubenswrapper[4798]: I1011 04:43:03.728703 4798 scope.go:117] "RemoveContainer" containerID="bb96101203c57a3974ffee88e73693d1c5a99c81f8ce510125bd959dde904858" Oct 11 04:43:03 crc kubenswrapper[4798]: E1011 04:43:03.729382 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bb96101203c57a3974ffee88e73693d1c5a99c81f8ce510125bd959dde904858\": container with ID starting with bb96101203c57a3974ffee88e73693d1c5a99c81f8ce510125bd959dde904858 not found: ID does not exist" containerID="bb96101203c57a3974ffee88e73693d1c5a99c81f8ce510125bd959dde904858" Oct 11 04:43:03 crc kubenswrapper[4798]: I1011 04:43:03.729457 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bb96101203c57a3974ffee88e73693d1c5a99c81f8ce510125bd959dde904858"} err="failed to get container status \"bb96101203c57a3974ffee88e73693d1c5a99c81f8ce510125bd959dde904858\": rpc error: code = NotFound desc = could not find container \"bb96101203c57a3974ffee88e73693d1c5a99c81f8ce510125bd959dde904858\": container with ID starting with bb96101203c57a3974ffee88e73693d1c5a99c81f8ce510125bd959dde904858 not found: ID does not exist" Oct 11 04:43:03 crc kubenswrapper[4798]: I1011 04:43:03.729490 4798 scope.go:117] "RemoveContainer" containerID="821d3793547d37ca9e774ebed9dc98b68b879fd57f945cb537084ff118a7a97b" Oct 11 04:43:03 crc kubenswrapper[4798]: E1011 04:43:03.730492 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"821d3793547d37ca9e774ebed9dc98b68b879fd57f945cb537084ff118a7a97b\": container with ID starting with 821d3793547d37ca9e774ebed9dc98b68b879fd57f945cb537084ff118a7a97b not found: ID does not exist" containerID="821d3793547d37ca9e774ebed9dc98b68b879fd57f945cb537084ff118a7a97b" Oct 11 04:43:03 crc kubenswrapper[4798]: I1011 04:43:03.730584 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"821d3793547d37ca9e774ebed9dc98b68b879fd57f945cb537084ff118a7a97b"} err="failed to get container status \"821d3793547d37ca9e774ebed9dc98b68b879fd57f945cb537084ff118a7a97b\": rpc error: code = NotFound desc = could not find container \"821d3793547d37ca9e774ebed9dc98b68b879fd57f945cb537084ff118a7a97b\": container with ID starting with 821d3793547d37ca9e774ebed9dc98b68b879fd57f945cb537084ff118a7a97b not found: ID does not exist" Oct 11 04:43:03 crc kubenswrapper[4798]: I1011 04:43:03.730630 4798 scope.go:117] "RemoveContainer" containerID="57194dd641d1ed00d71a80b72bb4bb1d72d7854300530ff9b3139a758b5901e7" Oct 11 04:43:03 crc kubenswrapper[4798]: E1011 04:43:03.731525 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"57194dd641d1ed00d71a80b72bb4bb1d72d7854300530ff9b3139a758b5901e7\": container with ID starting with 57194dd641d1ed00d71a80b72bb4bb1d72d7854300530ff9b3139a758b5901e7 not found: ID does not exist" containerID="57194dd641d1ed00d71a80b72bb4bb1d72d7854300530ff9b3139a758b5901e7" Oct 11 04:43:03 crc kubenswrapper[4798]: I1011 04:43:03.731566 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"57194dd641d1ed00d71a80b72bb4bb1d72d7854300530ff9b3139a758b5901e7"} err="failed to get container status \"57194dd641d1ed00d71a80b72bb4bb1d72d7854300530ff9b3139a758b5901e7\": rpc error: code = NotFound desc = could not find container \"57194dd641d1ed00d71a80b72bb4bb1d72d7854300530ff9b3139a758b5901e7\": container with ID starting with 57194dd641d1ed00d71a80b72bb4bb1d72d7854300530ff9b3139a758b5901e7 not found: ID does not exist" Oct 11 04:43:05 crc kubenswrapper[4798]: I1011 04:43:05.443890 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="94f510a4-1aff-421e-915d-4215163b7d94" path="/var/lib/kubelet/pods/94f510a4-1aff-421e-915d-4215163b7d94/volumes" Oct 11 04:43:06 crc kubenswrapper[4798]: I1011 04:43:06.424561 4798 scope.go:117] "RemoveContainer" containerID="999be6c75e4c41e3d1de7a3a50ee34084c0f4ead3f5f5fdebf32b1d0054c6b5f" Oct 11 04:43:06 crc kubenswrapper[4798]: E1011 04:43:06.425045 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h28s2_openshift-machine-config-operator(42571bc8-2186-4e3b-bba9-28f5a8f364d0)\"" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" Oct 11 04:43:17 crc kubenswrapper[4798]: I1011 04:43:17.434039 4798 scope.go:117] "RemoveContainer" containerID="999be6c75e4c41e3d1de7a3a50ee34084c0f4ead3f5f5fdebf32b1d0054c6b5f" Oct 11 04:43:17 crc kubenswrapper[4798]: E1011 04:43:17.436320 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h28s2_openshift-machine-config-operator(42571bc8-2186-4e3b-bba9-28f5a8f364d0)\"" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" Oct 11 04:43:32 crc kubenswrapper[4798]: I1011 04:43:32.424566 4798 scope.go:117] "RemoveContainer" containerID="999be6c75e4c41e3d1de7a3a50ee34084c0f4ead3f5f5fdebf32b1d0054c6b5f" Oct 11 04:43:32 crc kubenswrapper[4798]: E1011 04:43:32.426066 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h28s2_openshift-machine-config-operator(42571bc8-2186-4e3b-bba9-28f5a8f364d0)\"" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" Oct 11 04:43:43 crc kubenswrapper[4798]: I1011 04:43:43.426812 4798 scope.go:117] "RemoveContainer" containerID="999be6c75e4c41e3d1de7a3a50ee34084c0f4ead3f5f5fdebf32b1d0054c6b5f" Oct 11 04:43:43 crc kubenswrapper[4798]: E1011 04:43:43.428051 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h28s2_openshift-machine-config-operator(42571bc8-2186-4e3b-bba9-28f5a8f364d0)\"" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" Oct 11 04:43:58 crc kubenswrapper[4798]: I1011 04:43:58.424583 4798 scope.go:117] "RemoveContainer" containerID="999be6c75e4c41e3d1de7a3a50ee34084c0f4ead3f5f5fdebf32b1d0054c6b5f" Oct 11 04:43:58 crc kubenswrapper[4798]: E1011 04:43:58.425714 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h28s2_openshift-machine-config-operator(42571bc8-2186-4e3b-bba9-28f5a8f364d0)\"" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" Oct 11 04:44:10 crc kubenswrapper[4798]: I1011 04:44:10.425188 4798 scope.go:117] "RemoveContainer" containerID="999be6c75e4c41e3d1de7a3a50ee34084c0f4ead3f5f5fdebf32b1d0054c6b5f" Oct 11 04:44:10 crc kubenswrapper[4798]: E1011 04:44:10.426277 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h28s2_openshift-machine-config-operator(42571bc8-2186-4e3b-bba9-28f5a8f364d0)\"" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" Oct 11 04:44:22 crc kubenswrapper[4798]: I1011 04:44:22.424548 4798 scope.go:117] "RemoveContainer" containerID="999be6c75e4c41e3d1de7a3a50ee34084c0f4ead3f5f5fdebf32b1d0054c6b5f" Oct 11 04:44:22 crc kubenswrapper[4798]: E1011 04:44:22.425961 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h28s2_openshift-machine-config-operator(42571bc8-2186-4e3b-bba9-28f5a8f364d0)\"" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" Oct 11 04:44:37 crc kubenswrapper[4798]: I1011 04:44:37.424145 4798 scope.go:117] "RemoveContainer" containerID="999be6c75e4c41e3d1de7a3a50ee34084c0f4ead3f5f5fdebf32b1d0054c6b5f" Oct 11 04:44:37 crc kubenswrapper[4798]: E1011 04:44:37.427169 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h28s2_openshift-machine-config-operator(42571bc8-2186-4e3b-bba9-28f5a8f364d0)\"" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" Oct 11 04:44:51 crc kubenswrapper[4798]: I1011 04:44:51.424126 4798 scope.go:117] "RemoveContainer" containerID="999be6c75e4c41e3d1de7a3a50ee34084c0f4ead3f5f5fdebf32b1d0054c6b5f" Oct 11 04:44:51 crc kubenswrapper[4798]: E1011 04:44:51.425050 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h28s2_openshift-machine-config-operator(42571bc8-2186-4e3b-bba9-28f5a8f364d0)\"" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" Oct 11 04:45:00 crc kubenswrapper[4798]: I1011 04:45:00.204965 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29335965-7c6rj"] Oct 11 04:45:00 crc kubenswrapper[4798]: E1011 04:45:00.206122 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="94f510a4-1aff-421e-915d-4215163b7d94" containerName="registry-server" Oct 11 04:45:00 crc kubenswrapper[4798]: I1011 04:45:00.206139 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="94f510a4-1aff-421e-915d-4215163b7d94" containerName="registry-server" Oct 11 04:45:00 crc kubenswrapper[4798]: E1011 04:45:00.206173 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="94f510a4-1aff-421e-915d-4215163b7d94" containerName="extract-content" Oct 11 04:45:00 crc kubenswrapper[4798]: I1011 04:45:00.206185 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="94f510a4-1aff-421e-915d-4215163b7d94" containerName="extract-content" Oct 11 04:45:00 crc kubenswrapper[4798]: E1011 04:45:00.206218 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="94f510a4-1aff-421e-915d-4215163b7d94" containerName="extract-utilities" Oct 11 04:45:00 crc kubenswrapper[4798]: I1011 04:45:00.206230 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="94f510a4-1aff-421e-915d-4215163b7d94" containerName="extract-utilities" Oct 11 04:45:00 crc kubenswrapper[4798]: I1011 04:45:00.206602 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="94f510a4-1aff-421e-915d-4215163b7d94" containerName="registry-server" Oct 11 04:45:00 crc kubenswrapper[4798]: I1011 04:45:00.207329 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29335965-7c6rj" Oct 11 04:45:00 crc kubenswrapper[4798]: I1011 04:45:00.210765 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Oct 11 04:45:00 crc kubenswrapper[4798]: I1011 04:45:00.217247 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29335965-7c6rj"] Oct 11 04:45:00 crc kubenswrapper[4798]: I1011 04:45:00.223193 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Oct 11 04:45:00 crc kubenswrapper[4798]: I1011 04:45:00.246406 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/33fb0a65-6d07-4944-8c44-f851b2301bf0-secret-volume\") pod \"collect-profiles-29335965-7c6rj\" (UID: \"33fb0a65-6d07-4944-8c44-f851b2301bf0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29335965-7c6rj" Oct 11 04:45:00 crc kubenswrapper[4798]: I1011 04:45:00.246482 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/33fb0a65-6d07-4944-8c44-f851b2301bf0-config-volume\") pod \"collect-profiles-29335965-7c6rj\" (UID: \"33fb0a65-6d07-4944-8c44-f851b2301bf0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29335965-7c6rj" Oct 11 04:45:00 crc kubenswrapper[4798]: I1011 04:45:00.246819 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pjpsb\" (UniqueName: \"kubernetes.io/projected/33fb0a65-6d07-4944-8c44-f851b2301bf0-kube-api-access-pjpsb\") pod \"collect-profiles-29335965-7c6rj\" (UID: \"33fb0a65-6d07-4944-8c44-f851b2301bf0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29335965-7c6rj" Oct 11 04:45:00 crc kubenswrapper[4798]: I1011 04:45:00.348812 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/33fb0a65-6d07-4944-8c44-f851b2301bf0-secret-volume\") pod \"collect-profiles-29335965-7c6rj\" (UID: \"33fb0a65-6d07-4944-8c44-f851b2301bf0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29335965-7c6rj" Oct 11 04:45:00 crc kubenswrapper[4798]: I1011 04:45:00.349261 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/33fb0a65-6d07-4944-8c44-f851b2301bf0-config-volume\") pod \"collect-profiles-29335965-7c6rj\" (UID: \"33fb0a65-6d07-4944-8c44-f851b2301bf0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29335965-7c6rj" Oct 11 04:45:00 crc kubenswrapper[4798]: I1011 04:45:00.349516 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pjpsb\" (UniqueName: \"kubernetes.io/projected/33fb0a65-6d07-4944-8c44-f851b2301bf0-kube-api-access-pjpsb\") pod \"collect-profiles-29335965-7c6rj\" (UID: \"33fb0a65-6d07-4944-8c44-f851b2301bf0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29335965-7c6rj" Oct 11 04:45:00 crc kubenswrapper[4798]: I1011 04:45:00.351324 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/33fb0a65-6d07-4944-8c44-f851b2301bf0-config-volume\") pod \"collect-profiles-29335965-7c6rj\" (UID: \"33fb0a65-6d07-4944-8c44-f851b2301bf0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29335965-7c6rj" Oct 11 04:45:00 crc kubenswrapper[4798]: I1011 04:45:00.357077 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/33fb0a65-6d07-4944-8c44-f851b2301bf0-secret-volume\") pod \"collect-profiles-29335965-7c6rj\" (UID: \"33fb0a65-6d07-4944-8c44-f851b2301bf0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29335965-7c6rj" Oct 11 04:45:00 crc kubenswrapper[4798]: I1011 04:45:00.371719 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pjpsb\" (UniqueName: \"kubernetes.io/projected/33fb0a65-6d07-4944-8c44-f851b2301bf0-kube-api-access-pjpsb\") pod \"collect-profiles-29335965-7c6rj\" (UID: \"33fb0a65-6d07-4944-8c44-f851b2301bf0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29335965-7c6rj" Oct 11 04:45:00 crc kubenswrapper[4798]: I1011 04:45:00.542490 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29335965-7c6rj" Oct 11 04:45:01 crc kubenswrapper[4798]: I1011 04:45:01.031635 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29335965-7c6rj"] Oct 11 04:45:02 crc kubenswrapper[4798]: I1011 04:45:02.040599 4798 generic.go:334] "Generic (PLEG): container finished" podID="33fb0a65-6d07-4944-8c44-f851b2301bf0" containerID="e95f5f5d5994c7e5cc7a0613996f38c2b45dcaa40f7464e9bee016e0d5175a42" exitCode=0 Oct 11 04:45:02 crc kubenswrapper[4798]: I1011 04:45:02.041581 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29335965-7c6rj" event={"ID":"33fb0a65-6d07-4944-8c44-f851b2301bf0","Type":"ContainerDied","Data":"e95f5f5d5994c7e5cc7a0613996f38c2b45dcaa40f7464e9bee016e0d5175a42"} Oct 11 04:45:02 crc kubenswrapper[4798]: I1011 04:45:02.041630 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29335965-7c6rj" event={"ID":"33fb0a65-6d07-4944-8c44-f851b2301bf0","Type":"ContainerStarted","Data":"42754fb0d9ac440cede102be8c5a50feff2e145fca01f1b55566e7fab34840e6"} Oct 11 04:45:03 crc kubenswrapper[4798]: I1011 04:45:03.475791 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29335965-7c6rj" Oct 11 04:45:03 crc kubenswrapper[4798]: I1011 04:45:03.514837 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/33fb0a65-6d07-4944-8c44-f851b2301bf0-secret-volume\") pod \"33fb0a65-6d07-4944-8c44-f851b2301bf0\" (UID: \"33fb0a65-6d07-4944-8c44-f851b2301bf0\") " Oct 11 04:45:03 crc kubenswrapper[4798]: I1011 04:45:03.515148 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pjpsb\" (UniqueName: \"kubernetes.io/projected/33fb0a65-6d07-4944-8c44-f851b2301bf0-kube-api-access-pjpsb\") pod \"33fb0a65-6d07-4944-8c44-f851b2301bf0\" (UID: \"33fb0a65-6d07-4944-8c44-f851b2301bf0\") " Oct 11 04:45:03 crc kubenswrapper[4798]: I1011 04:45:03.515339 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/33fb0a65-6d07-4944-8c44-f851b2301bf0-config-volume\") pod \"33fb0a65-6d07-4944-8c44-f851b2301bf0\" (UID: \"33fb0a65-6d07-4944-8c44-f851b2301bf0\") " Oct 11 04:45:03 crc kubenswrapper[4798]: I1011 04:45:03.516561 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/33fb0a65-6d07-4944-8c44-f851b2301bf0-config-volume" (OuterVolumeSpecName: "config-volume") pod "33fb0a65-6d07-4944-8c44-f851b2301bf0" (UID: "33fb0a65-6d07-4944-8c44-f851b2301bf0"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 04:45:03 crc kubenswrapper[4798]: I1011 04:45:03.530854 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/33fb0a65-6d07-4944-8c44-f851b2301bf0-kube-api-access-pjpsb" (OuterVolumeSpecName: "kube-api-access-pjpsb") pod "33fb0a65-6d07-4944-8c44-f851b2301bf0" (UID: "33fb0a65-6d07-4944-8c44-f851b2301bf0"). InnerVolumeSpecName "kube-api-access-pjpsb". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 04:45:03 crc kubenswrapper[4798]: I1011 04:45:03.538494 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/33fb0a65-6d07-4944-8c44-f851b2301bf0-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "33fb0a65-6d07-4944-8c44-f851b2301bf0" (UID: "33fb0a65-6d07-4944-8c44-f851b2301bf0"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:45:03 crc kubenswrapper[4798]: I1011 04:45:03.618038 4798 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/33fb0a65-6d07-4944-8c44-f851b2301bf0-config-volume\") on node \"crc\" DevicePath \"\"" Oct 11 04:45:03 crc kubenswrapper[4798]: I1011 04:45:03.618085 4798 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/33fb0a65-6d07-4944-8c44-f851b2301bf0-secret-volume\") on node \"crc\" DevicePath \"\"" Oct 11 04:45:03 crc kubenswrapper[4798]: I1011 04:45:03.618100 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pjpsb\" (UniqueName: \"kubernetes.io/projected/33fb0a65-6d07-4944-8c44-f851b2301bf0-kube-api-access-pjpsb\") on node \"crc\" DevicePath \"\"" Oct 11 04:45:04 crc kubenswrapper[4798]: I1011 04:45:04.069337 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29335965-7c6rj" event={"ID":"33fb0a65-6d07-4944-8c44-f851b2301bf0","Type":"ContainerDied","Data":"42754fb0d9ac440cede102be8c5a50feff2e145fca01f1b55566e7fab34840e6"} Oct 11 04:45:04 crc kubenswrapper[4798]: I1011 04:45:04.070321 4798 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="42754fb0d9ac440cede102be8c5a50feff2e145fca01f1b55566e7fab34840e6" Oct 11 04:45:04 crc kubenswrapper[4798]: I1011 04:45:04.069463 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29335965-7c6rj" Oct 11 04:45:04 crc kubenswrapper[4798]: I1011 04:45:04.423153 4798 scope.go:117] "RemoveContainer" containerID="999be6c75e4c41e3d1de7a3a50ee34084c0f4ead3f5f5fdebf32b1d0054c6b5f" Oct 11 04:45:04 crc kubenswrapper[4798]: E1011 04:45:04.423482 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h28s2_openshift-machine-config-operator(42571bc8-2186-4e3b-bba9-28f5a8f364d0)\"" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" Oct 11 04:45:04 crc kubenswrapper[4798]: I1011 04:45:04.571276 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29335920-q6ndn"] Oct 11 04:45:04 crc kubenswrapper[4798]: I1011 04:45:04.581834 4798 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29335920-q6ndn"] Oct 11 04:45:05 crc kubenswrapper[4798]: I1011 04:45:05.436348 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8d53ca2e-470c-4b35-860b-0977c44cd826" path="/var/lib/kubelet/pods/8d53ca2e-470c-4b35-860b-0977c44cd826/volumes" Oct 11 04:45:19 crc kubenswrapper[4798]: I1011 04:45:19.425251 4798 scope.go:117] "RemoveContainer" containerID="999be6c75e4c41e3d1de7a3a50ee34084c0f4ead3f5f5fdebf32b1d0054c6b5f" Oct 11 04:45:19 crc kubenswrapper[4798]: E1011 04:45:19.426435 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h28s2_openshift-machine-config-operator(42571bc8-2186-4e3b-bba9-28f5a8f364d0)\"" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" Oct 11 04:45:22 crc kubenswrapper[4798]: I1011 04:45:22.150273 4798 scope.go:117] "RemoveContainer" containerID="f73f3116bbbab7aeeb118d43293a53d67324db76c4e47d2b8dd2a153410da819" Oct 11 04:45:31 crc kubenswrapper[4798]: I1011 04:45:31.425146 4798 scope.go:117] "RemoveContainer" containerID="999be6c75e4c41e3d1de7a3a50ee34084c0f4ead3f5f5fdebf32b1d0054c6b5f" Oct 11 04:45:31 crc kubenswrapper[4798]: E1011 04:45:31.426237 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h28s2_openshift-machine-config-operator(42571bc8-2186-4e3b-bba9-28f5a8f364d0)\"" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" Oct 11 04:45:43 crc kubenswrapper[4798]: I1011 04:45:43.425462 4798 scope.go:117] "RemoveContainer" containerID="999be6c75e4c41e3d1de7a3a50ee34084c0f4ead3f5f5fdebf32b1d0054c6b5f" Oct 11 04:45:43 crc kubenswrapper[4798]: E1011 04:45:43.427209 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h28s2_openshift-machine-config-operator(42571bc8-2186-4e3b-bba9-28f5a8f364d0)\"" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" Oct 11 04:45:57 crc kubenswrapper[4798]: I1011 04:45:57.424957 4798 scope.go:117] "RemoveContainer" containerID="999be6c75e4c41e3d1de7a3a50ee34084c0f4ead3f5f5fdebf32b1d0054c6b5f" Oct 11 04:45:57 crc kubenswrapper[4798]: E1011 04:45:57.425961 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h28s2_openshift-machine-config-operator(42571bc8-2186-4e3b-bba9-28f5a8f364d0)\"" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" Oct 11 04:46:10 crc kubenswrapper[4798]: I1011 04:46:10.423854 4798 scope.go:117] "RemoveContainer" containerID="999be6c75e4c41e3d1de7a3a50ee34084c0f4ead3f5f5fdebf32b1d0054c6b5f" Oct 11 04:46:10 crc kubenswrapper[4798]: E1011 04:46:10.424799 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h28s2_openshift-machine-config-operator(42571bc8-2186-4e3b-bba9-28f5a8f364d0)\"" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" Oct 11 04:46:11 crc kubenswrapper[4798]: I1011 04:46:11.873967 4798 generic.go:334] "Generic (PLEG): container finished" podID="1a79dbc6-3c66-4728-8984-fecebe3eb6f6" containerID="d4b8cfe8121edf22284ee8438ddf52b2109886786d67a64c05ffb79bc70d467b" exitCode=0 Oct 11 04:46:11 crc kubenswrapper[4798]: I1011 04:46:11.874558 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-p4572" event={"ID":"1a79dbc6-3c66-4728-8984-fecebe3eb6f6","Type":"ContainerDied","Data":"d4b8cfe8121edf22284ee8438ddf52b2109886786d67a64c05ffb79bc70d467b"} Oct 11 04:46:13 crc kubenswrapper[4798]: I1011 04:46:13.464253 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-p4572" Oct 11 04:46:13 crc kubenswrapper[4798]: I1011 04:46:13.657240 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/1a79dbc6-3c66-4728-8984-fecebe3eb6f6-nova-cell1-compute-config-1\") pod \"1a79dbc6-3c66-4728-8984-fecebe3eb6f6\" (UID: \"1a79dbc6-3c66-4728-8984-fecebe3eb6f6\") " Oct 11 04:46:13 crc kubenswrapper[4798]: I1011 04:46:13.657343 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/1a79dbc6-3c66-4728-8984-fecebe3eb6f6-nova-cell1-compute-config-0\") pod \"1a79dbc6-3c66-4728-8984-fecebe3eb6f6\" (UID: \"1a79dbc6-3c66-4728-8984-fecebe3eb6f6\") " Oct 11 04:46:13 crc kubenswrapper[4798]: I1011 04:46:13.657418 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d84r5\" (UniqueName: \"kubernetes.io/projected/1a79dbc6-3c66-4728-8984-fecebe3eb6f6-kube-api-access-d84r5\") pod \"1a79dbc6-3c66-4728-8984-fecebe3eb6f6\" (UID: \"1a79dbc6-3c66-4728-8984-fecebe3eb6f6\") " Oct 11 04:46:13 crc kubenswrapper[4798]: I1011 04:46:13.657443 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph-nova-0\" (UniqueName: \"kubernetes.io/configmap/1a79dbc6-3c66-4728-8984-fecebe3eb6f6-ceph-nova-0\") pod \"1a79dbc6-3c66-4728-8984-fecebe3eb6f6\" (UID: \"1a79dbc6-3c66-4728-8984-fecebe3eb6f6\") " Oct 11 04:46:13 crc kubenswrapper[4798]: I1011 04:46:13.657517 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-custom-ceph-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1a79dbc6-3c66-4728-8984-fecebe3eb6f6-nova-custom-ceph-combined-ca-bundle\") pod \"1a79dbc6-3c66-4728-8984-fecebe3eb6f6\" (UID: \"1a79dbc6-3c66-4728-8984-fecebe3eb6f6\") " Oct 11 04:46:13 crc kubenswrapper[4798]: I1011 04:46:13.657598 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/1a79dbc6-3c66-4728-8984-fecebe3eb6f6-ceph\") pod \"1a79dbc6-3c66-4728-8984-fecebe3eb6f6\" (UID: \"1a79dbc6-3c66-4728-8984-fecebe3eb6f6\") " Oct 11 04:46:13 crc kubenswrapper[4798]: I1011 04:46:13.657646 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1a79dbc6-3c66-4728-8984-fecebe3eb6f6-inventory\") pod \"1a79dbc6-3c66-4728-8984-fecebe3eb6f6\" (UID: \"1a79dbc6-3c66-4728-8984-fecebe3eb6f6\") " Oct 11 04:46:13 crc kubenswrapper[4798]: I1011 04:46:13.657684 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/1a79dbc6-3c66-4728-8984-fecebe3eb6f6-nova-migration-ssh-key-1\") pod \"1a79dbc6-3c66-4728-8984-fecebe3eb6f6\" (UID: \"1a79dbc6-3c66-4728-8984-fecebe3eb6f6\") " Oct 11 04:46:13 crc kubenswrapper[4798]: I1011 04:46:13.657711 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/1a79dbc6-3c66-4728-8984-fecebe3eb6f6-nova-migration-ssh-key-0\") pod \"1a79dbc6-3c66-4728-8984-fecebe3eb6f6\" (UID: \"1a79dbc6-3c66-4728-8984-fecebe3eb6f6\") " Oct 11 04:46:13 crc kubenswrapper[4798]: I1011 04:46:13.657753 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/1a79dbc6-3c66-4728-8984-fecebe3eb6f6-ssh-key\") pod \"1a79dbc6-3c66-4728-8984-fecebe3eb6f6\" (UID: \"1a79dbc6-3c66-4728-8984-fecebe3eb6f6\") " Oct 11 04:46:13 crc kubenswrapper[4798]: I1011 04:46:13.657806 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/1a79dbc6-3c66-4728-8984-fecebe3eb6f6-nova-extra-config-0\") pod \"1a79dbc6-3c66-4728-8984-fecebe3eb6f6\" (UID: \"1a79dbc6-3c66-4728-8984-fecebe3eb6f6\") " Oct 11 04:46:13 crc kubenswrapper[4798]: I1011 04:46:13.665425 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1a79dbc6-3c66-4728-8984-fecebe3eb6f6-ceph" (OuterVolumeSpecName: "ceph") pod "1a79dbc6-3c66-4728-8984-fecebe3eb6f6" (UID: "1a79dbc6-3c66-4728-8984-fecebe3eb6f6"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:46:13 crc kubenswrapper[4798]: I1011 04:46:13.668589 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1a79dbc6-3c66-4728-8984-fecebe3eb6f6-nova-custom-ceph-combined-ca-bundle" (OuterVolumeSpecName: "nova-custom-ceph-combined-ca-bundle") pod "1a79dbc6-3c66-4728-8984-fecebe3eb6f6" (UID: "1a79dbc6-3c66-4728-8984-fecebe3eb6f6"). InnerVolumeSpecName "nova-custom-ceph-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:46:13 crc kubenswrapper[4798]: I1011 04:46:13.668654 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1a79dbc6-3c66-4728-8984-fecebe3eb6f6-kube-api-access-d84r5" (OuterVolumeSpecName: "kube-api-access-d84r5") pod "1a79dbc6-3c66-4728-8984-fecebe3eb6f6" (UID: "1a79dbc6-3c66-4728-8984-fecebe3eb6f6"). InnerVolumeSpecName "kube-api-access-d84r5". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 04:46:13 crc kubenswrapper[4798]: I1011 04:46:13.689270 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1a79dbc6-3c66-4728-8984-fecebe3eb6f6-ceph-nova-0" (OuterVolumeSpecName: "ceph-nova-0") pod "1a79dbc6-3c66-4728-8984-fecebe3eb6f6" (UID: "1a79dbc6-3c66-4728-8984-fecebe3eb6f6"). InnerVolumeSpecName "ceph-nova-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 04:46:13 crc kubenswrapper[4798]: I1011 04:46:13.689455 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1a79dbc6-3c66-4728-8984-fecebe3eb6f6-nova-migration-ssh-key-1" (OuterVolumeSpecName: "nova-migration-ssh-key-1") pod "1a79dbc6-3c66-4728-8984-fecebe3eb6f6" (UID: "1a79dbc6-3c66-4728-8984-fecebe3eb6f6"). InnerVolumeSpecName "nova-migration-ssh-key-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:46:13 crc kubenswrapper[4798]: I1011 04:46:13.696456 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1a79dbc6-3c66-4728-8984-fecebe3eb6f6-nova-migration-ssh-key-0" (OuterVolumeSpecName: "nova-migration-ssh-key-0") pod "1a79dbc6-3c66-4728-8984-fecebe3eb6f6" (UID: "1a79dbc6-3c66-4728-8984-fecebe3eb6f6"). InnerVolumeSpecName "nova-migration-ssh-key-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:46:13 crc kubenswrapper[4798]: I1011 04:46:13.699574 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1a79dbc6-3c66-4728-8984-fecebe3eb6f6-nova-extra-config-0" (OuterVolumeSpecName: "nova-extra-config-0") pod "1a79dbc6-3c66-4728-8984-fecebe3eb6f6" (UID: "1a79dbc6-3c66-4728-8984-fecebe3eb6f6"). InnerVolumeSpecName "nova-extra-config-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 04:46:13 crc kubenswrapper[4798]: I1011 04:46:13.701099 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1a79dbc6-3c66-4728-8984-fecebe3eb6f6-nova-cell1-compute-config-0" (OuterVolumeSpecName: "nova-cell1-compute-config-0") pod "1a79dbc6-3c66-4728-8984-fecebe3eb6f6" (UID: "1a79dbc6-3c66-4728-8984-fecebe3eb6f6"). InnerVolumeSpecName "nova-cell1-compute-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:46:13 crc kubenswrapper[4798]: I1011 04:46:13.707899 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1a79dbc6-3c66-4728-8984-fecebe3eb6f6-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "1a79dbc6-3c66-4728-8984-fecebe3eb6f6" (UID: "1a79dbc6-3c66-4728-8984-fecebe3eb6f6"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:46:13 crc kubenswrapper[4798]: I1011 04:46:13.714708 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1a79dbc6-3c66-4728-8984-fecebe3eb6f6-inventory" (OuterVolumeSpecName: "inventory") pod "1a79dbc6-3c66-4728-8984-fecebe3eb6f6" (UID: "1a79dbc6-3c66-4728-8984-fecebe3eb6f6"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:46:13 crc kubenswrapper[4798]: I1011 04:46:13.715195 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1a79dbc6-3c66-4728-8984-fecebe3eb6f6-nova-cell1-compute-config-1" (OuterVolumeSpecName: "nova-cell1-compute-config-1") pod "1a79dbc6-3c66-4728-8984-fecebe3eb6f6" (UID: "1a79dbc6-3c66-4728-8984-fecebe3eb6f6"). InnerVolumeSpecName "nova-cell1-compute-config-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:46:13 crc kubenswrapper[4798]: I1011 04:46:13.768268 4798 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/1a79dbc6-3c66-4728-8984-fecebe3eb6f6-ceph\") on node \"crc\" DevicePath \"\"" Oct 11 04:46:13 crc kubenswrapper[4798]: I1011 04:46:13.768326 4798 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1a79dbc6-3c66-4728-8984-fecebe3eb6f6-inventory\") on node \"crc\" DevicePath \"\"" Oct 11 04:46:13 crc kubenswrapper[4798]: I1011 04:46:13.768356 4798 reconciler_common.go:293] "Volume detached for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/1a79dbc6-3c66-4728-8984-fecebe3eb6f6-nova-migration-ssh-key-1\") on node \"crc\" DevicePath \"\"" Oct 11 04:46:13 crc kubenswrapper[4798]: I1011 04:46:13.768371 4798 reconciler_common.go:293] "Volume detached for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/1a79dbc6-3c66-4728-8984-fecebe3eb6f6-nova-migration-ssh-key-0\") on node \"crc\" DevicePath \"\"" Oct 11 04:46:13 crc kubenswrapper[4798]: I1011 04:46:13.768385 4798 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/1a79dbc6-3c66-4728-8984-fecebe3eb6f6-ssh-key\") on node \"crc\" DevicePath \"\"" Oct 11 04:46:13 crc kubenswrapper[4798]: I1011 04:46:13.768415 4798 reconciler_common.go:293] "Volume detached for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/1a79dbc6-3c66-4728-8984-fecebe3eb6f6-nova-extra-config-0\") on node \"crc\" DevicePath \"\"" Oct 11 04:46:13 crc kubenswrapper[4798]: I1011 04:46:13.768434 4798 reconciler_common.go:293] "Volume detached for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/1a79dbc6-3c66-4728-8984-fecebe3eb6f6-nova-cell1-compute-config-1\") on node \"crc\" DevicePath \"\"" Oct 11 04:46:13 crc kubenswrapper[4798]: I1011 04:46:13.768448 4798 reconciler_common.go:293] "Volume detached for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/1a79dbc6-3c66-4728-8984-fecebe3eb6f6-nova-cell1-compute-config-0\") on node \"crc\" DevicePath \"\"" Oct 11 04:46:13 crc kubenswrapper[4798]: I1011 04:46:13.768460 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d84r5\" (UniqueName: \"kubernetes.io/projected/1a79dbc6-3c66-4728-8984-fecebe3eb6f6-kube-api-access-d84r5\") on node \"crc\" DevicePath \"\"" Oct 11 04:46:13 crc kubenswrapper[4798]: I1011 04:46:13.768471 4798 reconciler_common.go:293] "Volume detached for volume \"ceph-nova-0\" (UniqueName: \"kubernetes.io/configmap/1a79dbc6-3c66-4728-8984-fecebe3eb6f6-ceph-nova-0\") on node \"crc\" DevicePath \"\"" Oct 11 04:46:13 crc kubenswrapper[4798]: I1011 04:46:13.768487 4798 reconciler_common.go:293] "Volume detached for volume \"nova-custom-ceph-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1a79dbc6-3c66-4728-8984-fecebe3eb6f6-nova-custom-ceph-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 11 04:46:13 crc kubenswrapper[4798]: I1011 04:46:13.899073 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-p4572" event={"ID":"1a79dbc6-3c66-4728-8984-fecebe3eb6f6","Type":"ContainerDied","Data":"d3b5ec3182541623fe2bc58f72ff0faf85abc9241c6c6f576f59bb6edbb9c46a"} Oct 11 04:46:13 crc kubenswrapper[4798]: I1011 04:46:13.899135 4798 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d3b5ec3182541623fe2bc58f72ff0faf85abc9241c6c6f576f59bb6edbb9c46a" Oct 11 04:46:13 crc kubenswrapper[4798]: I1011 04:46:13.899200 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-p4572" Oct 11 04:46:22 crc kubenswrapper[4798]: I1011 04:46:22.424281 4798 scope.go:117] "RemoveContainer" containerID="999be6c75e4c41e3d1de7a3a50ee34084c0f4ead3f5f5fdebf32b1d0054c6b5f" Oct 11 04:46:22 crc kubenswrapper[4798]: E1011 04:46:22.425421 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h28s2_openshift-machine-config-operator(42571bc8-2186-4e3b-bba9-28f5a8f364d0)\"" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" Oct 11 04:46:28 crc kubenswrapper[4798]: I1011 04:46:28.795903 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-volume-volume1-0"] Oct 11 04:46:28 crc kubenswrapper[4798]: E1011 04:46:28.797243 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1a79dbc6-3c66-4728-8984-fecebe3eb6f6" containerName="nova-custom-ceph-edpm-deployment-openstack-edpm-ipam" Oct 11 04:46:28 crc kubenswrapper[4798]: I1011 04:46:28.797263 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="1a79dbc6-3c66-4728-8984-fecebe3eb6f6" containerName="nova-custom-ceph-edpm-deployment-openstack-edpm-ipam" Oct 11 04:46:28 crc kubenswrapper[4798]: E1011 04:46:28.797278 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="33fb0a65-6d07-4944-8c44-f851b2301bf0" containerName="collect-profiles" Oct 11 04:46:28 crc kubenswrapper[4798]: I1011 04:46:28.797284 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="33fb0a65-6d07-4944-8c44-f851b2301bf0" containerName="collect-profiles" Oct 11 04:46:28 crc kubenswrapper[4798]: I1011 04:46:28.797489 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="1a79dbc6-3c66-4728-8984-fecebe3eb6f6" containerName="nova-custom-ceph-edpm-deployment-openstack-edpm-ipam" Oct 11 04:46:28 crc kubenswrapper[4798]: I1011 04:46:28.797502 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="33fb0a65-6d07-4944-8c44-f851b2301bf0" containerName="collect-profiles" Oct 11 04:46:28 crc kubenswrapper[4798]: I1011 04:46:28.798705 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-volume-volume1-0" Oct 11 04:46:28 crc kubenswrapper[4798]: I1011 04:46:28.802239 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceph-conf-files" Oct 11 04:46:28 crc kubenswrapper[4798]: I1011 04:46:28.802245 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-volume-volume1-config-data" Oct 11 04:46:28 crc kubenswrapper[4798]: I1011 04:46:28.809120 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-volume-volume1-0"] Oct 11 04:46:28 crc kubenswrapper[4798]: I1011 04:46:28.878016 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-backup-0"] Oct 11 04:46:28 crc kubenswrapper[4798]: I1011 04:46:28.880174 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-backup-0" Oct 11 04:46:28 crc kubenswrapper[4798]: I1011 04:46:28.884432 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-backup-config-data" Oct 11 04:46:28 crc kubenswrapper[4798]: I1011 04:46:28.896874 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-backup-0"] Oct 11 04:46:28 crc kubenswrapper[4798]: I1011 04:46:28.944574 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/6d69ada7-5b46-4e22-b304-8ca7957976c5-etc-machine-id\") pod \"cinder-volume-volume1-0\" (UID: \"6d69ada7-5b46-4e22-b304-8ca7957976c5\") " pod="openstack/cinder-volume-volume1-0" Oct 11 04:46:28 crc kubenswrapper[4798]: I1011 04:46:28.944648 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/6d69ada7-5b46-4e22-b304-8ca7957976c5-lib-modules\") pod \"cinder-volume-volume1-0\" (UID: \"6d69ada7-5b46-4e22-b304-8ca7957976c5\") " pod="openstack/cinder-volume-volume1-0" Oct 11 04:46:28 crc kubenswrapper[4798]: I1011 04:46:28.944675 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/6d69ada7-5b46-4e22-b304-8ca7957976c5-etc-iscsi\") pod \"cinder-volume-volume1-0\" (UID: \"6d69ada7-5b46-4e22-b304-8ca7957976c5\") " pod="openstack/cinder-volume-volume1-0" Oct 11 04:46:28 crc kubenswrapper[4798]: I1011 04:46:28.944706 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/6d69ada7-5b46-4e22-b304-8ca7957976c5-etc-nvme\") pod \"cinder-volume-volume1-0\" (UID: \"6d69ada7-5b46-4e22-b304-8ca7957976c5\") " pod="openstack/cinder-volume-volume1-0" Oct 11 04:46:28 crc kubenswrapper[4798]: I1011 04:46:28.944732 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-szfp8\" (UniqueName: \"kubernetes.io/projected/6d69ada7-5b46-4e22-b304-8ca7957976c5-kube-api-access-szfp8\") pod \"cinder-volume-volume1-0\" (UID: \"6d69ada7-5b46-4e22-b304-8ca7957976c5\") " pod="openstack/cinder-volume-volume1-0" Oct 11 04:46:28 crc kubenswrapper[4798]: I1011 04:46:28.944758 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/6d69ada7-5b46-4e22-b304-8ca7957976c5-config-data-custom\") pod \"cinder-volume-volume1-0\" (UID: \"6d69ada7-5b46-4e22-b304-8ca7957976c5\") " pod="openstack/cinder-volume-volume1-0" Oct 11 04:46:28 crc kubenswrapper[4798]: I1011 04:46:28.944788 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/6d69ada7-5b46-4e22-b304-8ca7957976c5-dev\") pod \"cinder-volume-volume1-0\" (UID: \"6d69ada7-5b46-4e22-b304-8ca7957976c5\") " pod="openstack/cinder-volume-volume1-0" Oct 11 04:46:28 crc kubenswrapper[4798]: I1011 04:46:28.944817 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/6d69ada7-5b46-4e22-b304-8ca7957976c5-ceph\") pod \"cinder-volume-volume1-0\" (UID: \"6d69ada7-5b46-4e22-b304-8ca7957976c5\") " pod="openstack/cinder-volume-volume1-0" Oct 11 04:46:28 crc kubenswrapper[4798]: I1011 04:46:28.944856 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6d69ada7-5b46-4e22-b304-8ca7957976c5-config-data\") pod \"cinder-volume-volume1-0\" (UID: \"6d69ada7-5b46-4e22-b304-8ca7957976c5\") " pod="openstack/cinder-volume-volume1-0" Oct 11 04:46:28 crc kubenswrapper[4798]: I1011 04:46:28.944871 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/6d69ada7-5b46-4e22-b304-8ca7957976c5-var-lib-cinder\") pod \"cinder-volume-volume1-0\" (UID: \"6d69ada7-5b46-4e22-b304-8ca7957976c5\") " pod="openstack/cinder-volume-volume1-0" Oct 11 04:46:28 crc kubenswrapper[4798]: I1011 04:46:28.944912 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/6d69ada7-5b46-4e22-b304-8ca7957976c5-var-locks-cinder\") pod \"cinder-volume-volume1-0\" (UID: \"6d69ada7-5b46-4e22-b304-8ca7957976c5\") " pod="openstack/cinder-volume-volume1-0" Oct 11 04:46:28 crc kubenswrapper[4798]: I1011 04:46:28.944932 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6d69ada7-5b46-4e22-b304-8ca7957976c5-scripts\") pod \"cinder-volume-volume1-0\" (UID: \"6d69ada7-5b46-4e22-b304-8ca7957976c5\") " pod="openstack/cinder-volume-volume1-0" Oct 11 04:46:28 crc kubenswrapper[4798]: I1011 04:46:28.944953 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/6d69ada7-5b46-4e22-b304-8ca7957976c5-sys\") pod \"cinder-volume-volume1-0\" (UID: \"6d69ada7-5b46-4e22-b304-8ca7957976c5\") " pod="openstack/cinder-volume-volume1-0" Oct 11 04:46:28 crc kubenswrapper[4798]: I1011 04:46:28.944991 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/6d69ada7-5b46-4e22-b304-8ca7957976c5-run\") pod \"cinder-volume-volume1-0\" (UID: \"6d69ada7-5b46-4e22-b304-8ca7957976c5\") " pod="openstack/cinder-volume-volume1-0" Oct 11 04:46:28 crc kubenswrapper[4798]: I1011 04:46:28.945012 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/6d69ada7-5b46-4e22-b304-8ca7957976c5-var-locks-brick\") pod \"cinder-volume-volume1-0\" (UID: \"6d69ada7-5b46-4e22-b304-8ca7957976c5\") " pod="openstack/cinder-volume-volume1-0" Oct 11 04:46:28 crc kubenswrapper[4798]: I1011 04:46:28.945031 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6d69ada7-5b46-4e22-b304-8ca7957976c5-combined-ca-bundle\") pod \"cinder-volume-volume1-0\" (UID: \"6d69ada7-5b46-4e22-b304-8ca7957976c5\") " pod="openstack/cinder-volume-volume1-0" Oct 11 04:46:29 crc kubenswrapper[4798]: I1011 04:46:29.046763 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/6d69ada7-5b46-4e22-b304-8ca7957976c5-dev\") pod \"cinder-volume-volume1-0\" (UID: \"6d69ada7-5b46-4e22-b304-8ca7957976c5\") " pod="openstack/cinder-volume-volume1-0" Oct 11 04:46:29 crc kubenswrapper[4798]: I1011 04:46:29.046821 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vlf8j\" (UniqueName: \"kubernetes.io/projected/f9dae869-d1e5-48c4-a08c-5f9e76f8a581-kube-api-access-vlf8j\") pod \"cinder-backup-0\" (UID: \"f9dae869-d1e5-48c4-a08c-5f9e76f8a581\") " pod="openstack/cinder-backup-0" Oct 11 04:46:29 crc kubenswrapper[4798]: I1011 04:46:29.046866 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/f9dae869-d1e5-48c4-a08c-5f9e76f8a581-etc-nvme\") pod \"cinder-backup-0\" (UID: \"f9dae869-d1e5-48c4-a08c-5f9e76f8a581\") " pod="openstack/cinder-backup-0" Oct 11 04:46:29 crc kubenswrapper[4798]: I1011 04:46:29.046884 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/f9dae869-d1e5-48c4-a08c-5f9e76f8a581-run\") pod \"cinder-backup-0\" (UID: \"f9dae869-d1e5-48c4-a08c-5f9e76f8a581\") " pod="openstack/cinder-backup-0" Oct 11 04:46:29 crc kubenswrapper[4798]: I1011 04:46:29.046903 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/6d69ada7-5b46-4e22-b304-8ca7957976c5-ceph\") pod \"cinder-volume-volume1-0\" (UID: \"6d69ada7-5b46-4e22-b304-8ca7957976c5\") " pod="openstack/cinder-volume-volume1-0" Oct 11 04:46:29 crc kubenswrapper[4798]: I1011 04:46:29.046918 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/f9dae869-d1e5-48c4-a08c-5f9e76f8a581-var-locks-cinder\") pod \"cinder-backup-0\" (UID: \"f9dae869-d1e5-48c4-a08c-5f9e76f8a581\") " pod="openstack/cinder-backup-0" Oct 11 04:46:29 crc kubenswrapper[4798]: I1011 04:46:29.046953 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/f9dae869-d1e5-48c4-a08c-5f9e76f8a581-lib-modules\") pod \"cinder-backup-0\" (UID: \"f9dae869-d1e5-48c4-a08c-5f9e76f8a581\") " pod="openstack/cinder-backup-0" Oct 11 04:46:29 crc kubenswrapper[4798]: I1011 04:46:29.046974 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f9dae869-d1e5-48c4-a08c-5f9e76f8a581-config-data-custom\") pod \"cinder-backup-0\" (UID: \"f9dae869-d1e5-48c4-a08c-5f9e76f8a581\") " pod="openstack/cinder-backup-0" Oct 11 04:46:29 crc kubenswrapper[4798]: I1011 04:46:29.047002 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6d69ada7-5b46-4e22-b304-8ca7957976c5-config-data\") pod \"cinder-volume-volume1-0\" (UID: \"6d69ada7-5b46-4e22-b304-8ca7957976c5\") " pod="openstack/cinder-volume-volume1-0" Oct 11 04:46:29 crc kubenswrapper[4798]: I1011 04:46:29.047017 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/6d69ada7-5b46-4e22-b304-8ca7957976c5-var-lib-cinder\") pod \"cinder-volume-volume1-0\" (UID: \"6d69ada7-5b46-4e22-b304-8ca7957976c5\") " pod="openstack/cinder-volume-volume1-0" Oct 11 04:46:29 crc kubenswrapper[4798]: I1011 04:46:29.047043 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f9dae869-d1e5-48c4-a08c-5f9e76f8a581-scripts\") pod \"cinder-backup-0\" (UID: \"f9dae869-d1e5-48c4-a08c-5f9e76f8a581\") " pod="openstack/cinder-backup-0" Oct 11 04:46:29 crc kubenswrapper[4798]: I1011 04:46:29.047060 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f9dae869-d1e5-48c4-a08c-5f9e76f8a581-config-data\") pod \"cinder-backup-0\" (UID: \"f9dae869-d1e5-48c4-a08c-5f9e76f8a581\") " pod="openstack/cinder-backup-0" Oct 11 04:46:29 crc kubenswrapper[4798]: I1011 04:46:29.047092 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/6d69ada7-5b46-4e22-b304-8ca7957976c5-var-locks-cinder\") pod \"cinder-volume-volume1-0\" (UID: \"6d69ada7-5b46-4e22-b304-8ca7957976c5\") " pod="openstack/cinder-volume-volume1-0" Oct 11 04:46:29 crc kubenswrapper[4798]: I1011 04:46:29.047110 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6d69ada7-5b46-4e22-b304-8ca7957976c5-scripts\") pod \"cinder-volume-volume1-0\" (UID: \"6d69ada7-5b46-4e22-b304-8ca7957976c5\") " pod="openstack/cinder-volume-volume1-0" Oct 11 04:46:29 crc kubenswrapper[4798]: I1011 04:46:29.047128 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/f9dae869-d1e5-48c4-a08c-5f9e76f8a581-etc-iscsi\") pod \"cinder-backup-0\" (UID: \"f9dae869-d1e5-48c4-a08c-5f9e76f8a581\") " pod="openstack/cinder-backup-0" Oct 11 04:46:29 crc kubenswrapper[4798]: I1011 04:46:29.047144 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/6d69ada7-5b46-4e22-b304-8ca7957976c5-sys\") pod \"cinder-volume-volume1-0\" (UID: \"6d69ada7-5b46-4e22-b304-8ca7957976c5\") " pod="openstack/cinder-volume-volume1-0" Oct 11 04:46:29 crc kubenswrapper[4798]: I1011 04:46:29.047169 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/f9dae869-d1e5-48c4-a08c-5f9e76f8a581-ceph\") pod \"cinder-backup-0\" (UID: \"f9dae869-d1e5-48c4-a08c-5f9e76f8a581\") " pod="openstack/cinder-backup-0" Oct 11 04:46:29 crc kubenswrapper[4798]: I1011 04:46:29.047192 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/6d69ada7-5b46-4e22-b304-8ca7957976c5-run\") pod \"cinder-volume-volume1-0\" (UID: \"6d69ada7-5b46-4e22-b304-8ca7957976c5\") " pod="openstack/cinder-volume-volume1-0" Oct 11 04:46:29 crc kubenswrapper[4798]: I1011 04:46:29.047221 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/f9dae869-d1e5-48c4-a08c-5f9e76f8a581-var-locks-brick\") pod \"cinder-backup-0\" (UID: \"f9dae869-d1e5-48c4-a08c-5f9e76f8a581\") " pod="openstack/cinder-backup-0" Oct 11 04:46:29 crc kubenswrapper[4798]: I1011 04:46:29.047238 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/6d69ada7-5b46-4e22-b304-8ca7957976c5-var-locks-brick\") pod \"cinder-volume-volume1-0\" (UID: \"6d69ada7-5b46-4e22-b304-8ca7957976c5\") " pod="openstack/cinder-volume-volume1-0" Oct 11 04:46:29 crc kubenswrapper[4798]: I1011 04:46:29.047258 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6d69ada7-5b46-4e22-b304-8ca7957976c5-combined-ca-bundle\") pod \"cinder-volume-volume1-0\" (UID: \"6d69ada7-5b46-4e22-b304-8ca7957976c5\") " pod="openstack/cinder-volume-volume1-0" Oct 11 04:46:29 crc kubenswrapper[4798]: I1011 04:46:29.047278 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/6d69ada7-5b46-4e22-b304-8ca7957976c5-etc-machine-id\") pod \"cinder-volume-volume1-0\" (UID: \"6d69ada7-5b46-4e22-b304-8ca7957976c5\") " pod="openstack/cinder-volume-volume1-0" Oct 11 04:46:29 crc kubenswrapper[4798]: I1011 04:46:29.047309 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/6d69ada7-5b46-4e22-b304-8ca7957976c5-lib-modules\") pod \"cinder-volume-volume1-0\" (UID: \"6d69ada7-5b46-4e22-b304-8ca7957976c5\") " pod="openstack/cinder-volume-volume1-0" Oct 11 04:46:29 crc kubenswrapper[4798]: I1011 04:46:29.047326 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/6d69ada7-5b46-4e22-b304-8ca7957976c5-etc-iscsi\") pod \"cinder-volume-volume1-0\" (UID: \"6d69ada7-5b46-4e22-b304-8ca7957976c5\") " pod="openstack/cinder-volume-volume1-0" Oct 11 04:46:29 crc kubenswrapper[4798]: I1011 04:46:29.047345 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f9dae869-d1e5-48c4-a08c-5f9e76f8a581-combined-ca-bundle\") pod \"cinder-backup-0\" (UID: \"f9dae869-d1e5-48c4-a08c-5f9e76f8a581\") " pod="openstack/cinder-backup-0" Oct 11 04:46:29 crc kubenswrapper[4798]: I1011 04:46:29.047370 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/6d69ada7-5b46-4e22-b304-8ca7957976c5-etc-nvme\") pod \"cinder-volume-volume1-0\" (UID: \"6d69ada7-5b46-4e22-b304-8ca7957976c5\") " pod="openstack/cinder-volume-volume1-0" Oct 11 04:46:29 crc kubenswrapper[4798]: I1011 04:46:29.047411 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/f9dae869-d1e5-48c4-a08c-5f9e76f8a581-dev\") pod \"cinder-backup-0\" (UID: \"f9dae869-d1e5-48c4-a08c-5f9e76f8a581\") " pod="openstack/cinder-backup-0" Oct 11 04:46:29 crc kubenswrapper[4798]: I1011 04:46:29.047428 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-szfp8\" (UniqueName: \"kubernetes.io/projected/6d69ada7-5b46-4e22-b304-8ca7957976c5-kube-api-access-szfp8\") pod \"cinder-volume-volume1-0\" (UID: \"6d69ada7-5b46-4e22-b304-8ca7957976c5\") " pod="openstack/cinder-volume-volume1-0" Oct 11 04:46:29 crc kubenswrapper[4798]: I1011 04:46:29.047449 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/f9dae869-d1e5-48c4-a08c-5f9e76f8a581-sys\") pod \"cinder-backup-0\" (UID: \"f9dae869-d1e5-48c4-a08c-5f9e76f8a581\") " pod="openstack/cinder-backup-0" Oct 11 04:46:29 crc kubenswrapper[4798]: I1011 04:46:29.047466 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/6d69ada7-5b46-4e22-b304-8ca7957976c5-config-data-custom\") pod \"cinder-volume-volume1-0\" (UID: \"6d69ada7-5b46-4e22-b304-8ca7957976c5\") " pod="openstack/cinder-volume-volume1-0" Oct 11 04:46:29 crc kubenswrapper[4798]: I1011 04:46:29.047486 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/f9dae869-d1e5-48c4-a08c-5f9e76f8a581-etc-machine-id\") pod \"cinder-backup-0\" (UID: \"f9dae869-d1e5-48c4-a08c-5f9e76f8a581\") " pod="openstack/cinder-backup-0" Oct 11 04:46:29 crc kubenswrapper[4798]: I1011 04:46:29.047505 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/f9dae869-d1e5-48c4-a08c-5f9e76f8a581-var-lib-cinder\") pod \"cinder-backup-0\" (UID: \"f9dae869-d1e5-48c4-a08c-5f9e76f8a581\") " pod="openstack/cinder-backup-0" Oct 11 04:46:29 crc kubenswrapper[4798]: I1011 04:46:29.047616 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/6d69ada7-5b46-4e22-b304-8ca7957976c5-dev\") pod \"cinder-volume-volume1-0\" (UID: \"6d69ada7-5b46-4e22-b304-8ca7957976c5\") " pod="openstack/cinder-volume-volume1-0" Oct 11 04:46:29 crc kubenswrapper[4798]: I1011 04:46:29.048562 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run\" (UniqueName: \"kubernetes.io/host-path/6d69ada7-5b46-4e22-b304-8ca7957976c5-run\") pod \"cinder-volume-volume1-0\" (UID: \"6d69ada7-5b46-4e22-b304-8ca7957976c5\") " pod="openstack/cinder-volume-volume1-0" Oct 11 04:46:29 crc kubenswrapper[4798]: I1011 04:46:29.048769 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/6d69ada7-5b46-4e22-b304-8ca7957976c5-sys\") pod \"cinder-volume-volume1-0\" (UID: \"6d69ada7-5b46-4e22-b304-8ca7957976c5\") " pod="openstack/cinder-volume-volume1-0" Oct 11 04:46:29 crc kubenswrapper[4798]: I1011 04:46:29.048800 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/6d69ada7-5b46-4e22-b304-8ca7957976c5-lib-modules\") pod \"cinder-volume-volume1-0\" (UID: \"6d69ada7-5b46-4e22-b304-8ca7957976c5\") " pod="openstack/cinder-volume-volume1-0" Oct 11 04:46:29 crc kubenswrapper[4798]: I1011 04:46:29.048800 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/6d69ada7-5b46-4e22-b304-8ca7957976c5-var-lib-cinder\") pod \"cinder-volume-volume1-0\" (UID: \"6d69ada7-5b46-4e22-b304-8ca7957976c5\") " pod="openstack/cinder-volume-volume1-0" Oct 11 04:46:29 crc kubenswrapper[4798]: I1011 04:46:29.048843 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/6d69ada7-5b46-4e22-b304-8ca7957976c5-etc-machine-id\") pod \"cinder-volume-volume1-0\" (UID: \"6d69ada7-5b46-4e22-b304-8ca7957976c5\") " pod="openstack/cinder-volume-volume1-0" Oct 11 04:46:29 crc kubenswrapper[4798]: I1011 04:46:29.048881 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/6d69ada7-5b46-4e22-b304-8ca7957976c5-etc-iscsi\") pod \"cinder-volume-volume1-0\" (UID: \"6d69ada7-5b46-4e22-b304-8ca7957976c5\") " pod="openstack/cinder-volume-volume1-0" Oct 11 04:46:29 crc kubenswrapper[4798]: I1011 04:46:29.049142 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/6d69ada7-5b46-4e22-b304-8ca7957976c5-etc-nvme\") pod \"cinder-volume-volume1-0\" (UID: \"6d69ada7-5b46-4e22-b304-8ca7957976c5\") " pod="openstack/cinder-volume-volume1-0" Oct 11 04:46:29 crc kubenswrapper[4798]: I1011 04:46:29.051628 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/6d69ada7-5b46-4e22-b304-8ca7957976c5-var-locks-brick\") pod \"cinder-volume-volume1-0\" (UID: \"6d69ada7-5b46-4e22-b304-8ca7957976c5\") " pod="openstack/cinder-volume-volume1-0" Oct 11 04:46:29 crc kubenswrapper[4798]: I1011 04:46:29.054815 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/6d69ada7-5b46-4e22-b304-8ca7957976c5-var-locks-cinder\") pod \"cinder-volume-volume1-0\" (UID: \"6d69ada7-5b46-4e22-b304-8ca7957976c5\") " pod="openstack/cinder-volume-volume1-0" Oct 11 04:46:29 crc kubenswrapper[4798]: I1011 04:46:29.056471 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6d69ada7-5b46-4e22-b304-8ca7957976c5-scripts\") pod \"cinder-volume-volume1-0\" (UID: \"6d69ada7-5b46-4e22-b304-8ca7957976c5\") " pod="openstack/cinder-volume-volume1-0" Oct 11 04:46:29 crc kubenswrapper[4798]: I1011 04:46:29.057960 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6d69ada7-5b46-4e22-b304-8ca7957976c5-combined-ca-bundle\") pod \"cinder-volume-volume1-0\" (UID: \"6d69ada7-5b46-4e22-b304-8ca7957976c5\") " pod="openstack/cinder-volume-volume1-0" Oct 11 04:46:29 crc kubenswrapper[4798]: I1011 04:46:29.058648 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6d69ada7-5b46-4e22-b304-8ca7957976c5-config-data\") pod \"cinder-volume-volume1-0\" (UID: \"6d69ada7-5b46-4e22-b304-8ca7957976c5\") " pod="openstack/cinder-volume-volume1-0" Oct 11 04:46:29 crc kubenswrapper[4798]: I1011 04:46:29.063988 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/6d69ada7-5b46-4e22-b304-8ca7957976c5-ceph\") pod \"cinder-volume-volume1-0\" (UID: \"6d69ada7-5b46-4e22-b304-8ca7957976c5\") " pod="openstack/cinder-volume-volume1-0" Oct 11 04:46:29 crc kubenswrapper[4798]: I1011 04:46:29.064078 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/6d69ada7-5b46-4e22-b304-8ca7957976c5-config-data-custom\") pod \"cinder-volume-volume1-0\" (UID: \"6d69ada7-5b46-4e22-b304-8ca7957976c5\") " pod="openstack/cinder-volume-volume1-0" Oct 11 04:46:29 crc kubenswrapper[4798]: I1011 04:46:29.079042 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-szfp8\" (UniqueName: \"kubernetes.io/projected/6d69ada7-5b46-4e22-b304-8ca7957976c5-kube-api-access-szfp8\") pod \"cinder-volume-volume1-0\" (UID: \"6d69ada7-5b46-4e22-b304-8ca7957976c5\") " pod="openstack/cinder-volume-volume1-0" Oct 11 04:46:29 crc kubenswrapper[4798]: I1011 04:46:29.122179 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-volume-volume1-0" Oct 11 04:46:29 crc kubenswrapper[4798]: I1011 04:46:29.149715 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/f9dae869-d1e5-48c4-a08c-5f9e76f8a581-dev\") pod \"cinder-backup-0\" (UID: \"f9dae869-d1e5-48c4-a08c-5f9e76f8a581\") " pod="openstack/cinder-backup-0" Oct 11 04:46:29 crc kubenswrapper[4798]: I1011 04:46:29.149776 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/f9dae869-d1e5-48c4-a08c-5f9e76f8a581-sys\") pod \"cinder-backup-0\" (UID: \"f9dae869-d1e5-48c4-a08c-5f9e76f8a581\") " pod="openstack/cinder-backup-0" Oct 11 04:46:29 crc kubenswrapper[4798]: I1011 04:46:29.149800 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/f9dae869-d1e5-48c4-a08c-5f9e76f8a581-etc-machine-id\") pod \"cinder-backup-0\" (UID: \"f9dae869-d1e5-48c4-a08c-5f9e76f8a581\") " pod="openstack/cinder-backup-0" Oct 11 04:46:29 crc kubenswrapper[4798]: I1011 04:46:29.149824 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/f9dae869-d1e5-48c4-a08c-5f9e76f8a581-var-lib-cinder\") pod \"cinder-backup-0\" (UID: \"f9dae869-d1e5-48c4-a08c-5f9e76f8a581\") " pod="openstack/cinder-backup-0" Oct 11 04:46:29 crc kubenswrapper[4798]: I1011 04:46:29.149853 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vlf8j\" (UniqueName: \"kubernetes.io/projected/f9dae869-d1e5-48c4-a08c-5f9e76f8a581-kube-api-access-vlf8j\") pod \"cinder-backup-0\" (UID: \"f9dae869-d1e5-48c4-a08c-5f9e76f8a581\") " pod="openstack/cinder-backup-0" Oct 11 04:46:29 crc kubenswrapper[4798]: I1011 04:46:29.149880 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/f9dae869-d1e5-48c4-a08c-5f9e76f8a581-run\") pod \"cinder-backup-0\" (UID: \"f9dae869-d1e5-48c4-a08c-5f9e76f8a581\") " pod="openstack/cinder-backup-0" Oct 11 04:46:29 crc kubenswrapper[4798]: I1011 04:46:29.149897 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/f9dae869-d1e5-48c4-a08c-5f9e76f8a581-etc-nvme\") pod \"cinder-backup-0\" (UID: \"f9dae869-d1e5-48c4-a08c-5f9e76f8a581\") " pod="openstack/cinder-backup-0" Oct 11 04:46:29 crc kubenswrapper[4798]: I1011 04:46:29.149912 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/f9dae869-d1e5-48c4-a08c-5f9e76f8a581-var-locks-cinder\") pod \"cinder-backup-0\" (UID: \"f9dae869-d1e5-48c4-a08c-5f9e76f8a581\") " pod="openstack/cinder-backup-0" Oct 11 04:46:29 crc kubenswrapper[4798]: I1011 04:46:29.149941 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/f9dae869-d1e5-48c4-a08c-5f9e76f8a581-lib-modules\") pod \"cinder-backup-0\" (UID: \"f9dae869-d1e5-48c4-a08c-5f9e76f8a581\") " pod="openstack/cinder-backup-0" Oct 11 04:46:29 crc kubenswrapper[4798]: I1011 04:46:29.149960 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f9dae869-d1e5-48c4-a08c-5f9e76f8a581-config-data-custom\") pod \"cinder-backup-0\" (UID: \"f9dae869-d1e5-48c4-a08c-5f9e76f8a581\") " pod="openstack/cinder-backup-0" Oct 11 04:46:29 crc kubenswrapper[4798]: I1011 04:46:29.149990 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f9dae869-d1e5-48c4-a08c-5f9e76f8a581-scripts\") pod \"cinder-backup-0\" (UID: \"f9dae869-d1e5-48c4-a08c-5f9e76f8a581\") " pod="openstack/cinder-backup-0" Oct 11 04:46:29 crc kubenswrapper[4798]: I1011 04:46:29.150007 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f9dae869-d1e5-48c4-a08c-5f9e76f8a581-config-data\") pod \"cinder-backup-0\" (UID: \"f9dae869-d1e5-48c4-a08c-5f9e76f8a581\") " pod="openstack/cinder-backup-0" Oct 11 04:46:29 crc kubenswrapper[4798]: I1011 04:46:29.150048 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/f9dae869-d1e5-48c4-a08c-5f9e76f8a581-etc-iscsi\") pod \"cinder-backup-0\" (UID: \"f9dae869-d1e5-48c4-a08c-5f9e76f8a581\") " pod="openstack/cinder-backup-0" Oct 11 04:46:29 crc kubenswrapper[4798]: I1011 04:46:29.150068 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/f9dae869-d1e5-48c4-a08c-5f9e76f8a581-ceph\") pod \"cinder-backup-0\" (UID: \"f9dae869-d1e5-48c4-a08c-5f9e76f8a581\") " pod="openstack/cinder-backup-0" Oct 11 04:46:29 crc kubenswrapper[4798]: I1011 04:46:29.150094 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/f9dae869-d1e5-48c4-a08c-5f9e76f8a581-var-locks-brick\") pod \"cinder-backup-0\" (UID: \"f9dae869-d1e5-48c4-a08c-5f9e76f8a581\") " pod="openstack/cinder-backup-0" Oct 11 04:46:29 crc kubenswrapper[4798]: I1011 04:46:29.150136 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f9dae869-d1e5-48c4-a08c-5f9e76f8a581-combined-ca-bundle\") pod \"cinder-backup-0\" (UID: \"f9dae869-d1e5-48c4-a08c-5f9e76f8a581\") " pod="openstack/cinder-backup-0" Oct 11 04:46:29 crc kubenswrapper[4798]: I1011 04:46:29.151475 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/f9dae869-d1e5-48c4-a08c-5f9e76f8a581-var-lib-cinder\") pod \"cinder-backup-0\" (UID: \"f9dae869-d1e5-48c4-a08c-5f9e76f8a581\") " pod="openstack/cinder-backup-0" Oct 11 04:46:29 crc kubenswrapper[4798]: I1011 04:46:29.151530 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/f9dae869-d1e5-48c4-a08c-5f9e76f8a581-dev\") pod \"cinder-backup-0\" (UID: \"f9dae869-d1e5-48c4-a08c-5f9e76f8a581\") " pod="openstack/cinder-backup-0" Oct 11 04:46:29 crc kubenswrapper[4798]: I1011 04:46:29.151540 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/f9dae869-d1e5-48c4-a08c-5f9e76f8a581-var-locks-cinder\") pod \"cinder-backup-0\" (UID: \"f9dae869-d1e5-48c4-a08c-5f9e76f8a581\") " pod="openstack/cinder-backup-0" Oct 11 04:46:29 crc kubenswrapper[4798]: I1011 04:46:29.151584 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/f9dae869-d1e5-48c4-a08c-5f9e76f8a581-etc-machine-id\") pod \"cinder-backup-0\" (UID: \"f9dae869-d1e5-48c4-a08c-5f9e76f8a581\") " pod="openstack/cinder-backup-0" Oct 11 04:46:29 crc kubenswrapper[4798]: I1011 04:46:29.151607 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run\" (UniqueName: \"kubernetes.io/host-path/f9dae869-d1e5-48c4-a08c-5f9e76f8a581-run\") pod \"cinder-backup-0\" (UID: \"f9dae869-d1e5-48c4-a08c-5f9e76f8a581\") " pod="openstack/cinder-backup-0" Oct 11 04:46:29 crc kubenswrapper[4798]: I1011 04:46:29.151688 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/f9dae869-d1e5-48c4-a08c-5f9e76f8a581-etc-nvme\") pod \"cinder-backup-0\" (UID: \"f9dae869-d1e5-48c4-a08c-5f9e76f8a581\") " pod="openstack/cinder-backup-0" Oct 11 04:46:29 crc kubenswrapper[4798]: I1011 04:46:29.151566 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/f9dae869-d1e5-48c4-a08c-5f9e76f8a581-sys\") pod \"cinder-backup-0\" (UID: \"f9dae869-d1e5-48c4-a08c-5f9e76f8a581\") " pod="openstack/cinder-backup-0" Oct 11 04:46:29 crc kubenswrapper[4798]: I1011 04:46:29.153845 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/f9dae869-d1e5-48c4-a08c-5f9e76f8a581-etc-iscsi\") pod \"cinder-backup-0\" (UID: \"f9dae869-d1e5-48c4-a08c-5f9e76f8a581\") " pod="openstack/cinder-backup-0" Oct 11 04:46:29 crc kubenswrapper[4798]: I1011 04:46:29.153842 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/f9dae869-d1e5-48c4-a08c-5f9e76f8a581-var-locks-brick\") pod \"cinder-backup-0\" (UID: \"f9dae869-d1e5-48c4-a08c-5f9e76f8a581\") " pod="openstack/cinder-backup-0" Oct 11 04:46:29 crc kubenswrapper[4798]: I1011 04:46:29.153953 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/f9dae869-d1e5-48c4-a08c-5f9e76f8a581-lib-modules\") pod \"cinder-backup-0\" (UID: \"f9dae869-d1e5-48c4-a08c-5f9e76f8a581\") " pod="openstack/cinder-backup-0" Oct 11 04:46:29 crc kubenswrapper[4798]: I1011 04:46:29.162112 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f9dae869-d1e5-48c4-a08c-5f9e76f8a581-combined-ca-bundle\") pod \"cinder-backup-0\" (UID: \"f9dae869-d1e5-48c4-a08c-5f9e76f8a581\") " pod="openstack/cinder-backup-0" Oct 11 04:46:29 crc kubenswrapper[4798]: I1011 04:46:29.164036 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f9dae869-d1e5-48c4-a08c-5f9e76f8a581-config-data-custom\") pod \"cinder-backup-0\" (UID: \"f9dae869-d1e5-48c4-a08c-5f9e76f8a581\") " pod="openstack/cinder-backup-0" Oct 11 04:46:29 crc kubenswrapper[4798]: I1011 04:46:29.175820 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f9dae869-d1e5-48c4-a08c-5f9e76f8a581-config-data\") pod \"cinder-backup-0\" (UID: \"f9dae869-d1e5-48c4-a08c-5f9e76f8a581\") " pod="openstack/cinder-backup-0" Oct 11 04:46:29 crc kubenswrapper[4798]: I1011 04:46:29.177419 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f9dae869-d1e5-48c4-a08c-5f9e76f8a581-scripts\") pod \"cinder-backup-0\" (UID: \"f9dae869-d1e5-48c4-a08c-5f9e76f8a581\") " pod="openstack/cinder-backup-0" Oct 11 04:46:29 crc kubenswrapper[4798]: I1011 04:46:29.179960 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vlf8j\" (UniqueName: \"kubernetes.io/projected/f9dae869-d1e5-48c4-a08c-5f9e76f8a581-kube-api-access-vlf8j\") pod \"cinder-backup-0\" (UID: \"f9dae869-d1e5-48c4-a08c-5f9e76f8a581\") " pod="openstack/cinder-backup-0" Oct 11 04:46:29 crc kubenswrapper[4798]: I1011 04:46:29.183099 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/f9dae869-d1e5-48c4-a08c-5f9e76f8a581-ceph\") pod \"cinder-backup-0\" (UID: \"f9dae869-d1e5-48c4-a08c-5f9e76f8a581\") " pod="openstack/cinder-backup-0" Oct 11 04:46:29 crc kubenswrapper[4798]: I1011 04:46:29.199879 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-backup-0" Oct 11 04:46:29 crc kubenswrapper[4798]: I1011 04:46:29.360706 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/manila-db-create-b4ct7"] Oct 11 04:46:29 crc kubenswrapper[4798]: I1011 04:46:29.365973 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-db-create-b4ct7" Oct 11 04:46:29 crc kubenswrapper[4798]: I1011 04:46:29.373283 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-db-create-b4ct7"] Oct 11 04:46:29 crc kubenswrapper[4798]: I1011 04:46:29.461862 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hbdv4\" (UniqueName: \"kubernetes.io/projected/b663620b-a5c4-40ff-a611-749eecb67c0a-kube-api-access-hbdv4\") pod \"manila-db-create-b4ct7\" (UID: \"b663620b-a5c4-40ff-a611-749eecb67c0a\") " pod="openstack/manila-db-create-b4ct7" Oct 11 04:46:29 crc kubenswrapper[4798]: I1011 04:46:29.521861 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-77dc4c7c69-h9tvx"] Oct 11 04:46:29 crc kubenswrapper[4798]: I1011 04:46:29.524347 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-77dc4c7c69-h9tvx" Oct 11 04:46:29 crc kubenswrapper[4798]: I1011 04:46:29.529571 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"horizon" Oct 11 04:46:29 crc kubenswrapper[4798]: I1011 04:46:29.529865 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"horizon-config-data" Oct 11 04:46:29 crc kubenswrapper[4798]: I1011 04:46:29.530073 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"horizon-scripts" Oct 11 04:46:29 crc kubenswrapper[4798]: I1011 04:46:29.530236 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"horizon-horizon-dockercfg-2jwfk" Oct 11 04:46:29 crc kubenswrapper[4798]: I1011 04:46:29.555286 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-77dc4c7c69-h9tvx"] Oct 11 04:46:29 crc kubenswrapper[4798]: I1011 04:46:29.566981 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hbdv4\" (UniqueName: \"kubernetes.io/projected/b663620b-a5c4-40ff-a611-749eecb67c0a-kube-api-access-hbdv4\") pod \"manila-db-create-b4ct7\" (UID: \"b663620b-a5c4-40ff-a611-749eecb67c0a\") " pod="openstack/manila-db-create-b4ct7" Oct 11 04:46:29 crc kubenswrapper[4798]: I1011 04:46:29.596683 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Oct 11 04:46:29 crc kubenswrapper[4798]: I1011 04:46:29.598592 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Oct 11 04:46:29 crc kubenswrapper[4798]: I1011 04:46:29.607891 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hbdv4\" (UniqueName: \"kubernetes.io/projected/b663620b-a5c4-40ff-a611-749eecb67c0a-kube-api-access-hbdv4\") pod \"manila-db-create-b4ct7\" (UID: \"b663620b-a5c4-40ff-a611-749eecb67c0a\") " pod="openstack/manila-db-create-b4ct7" Oct 11 04:46:29 crc kubenswrapper[4798]: I1011 04:46:29.626640 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-scripts" Oct 11 04:46:29 crc kubenswrapper[4798]: I1011 04:46:29.626794 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Oct 11 04:46:29 crc kubenswrapper[4798]: I1011 04:46:29.626877 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-k5vlw" Oct 11 04:46:29 crc kubenswrapper[4798]: I1011 04:46:29.627248 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Oct 11 04:46:29 crc kubenswrapper[4798]: I1011 04:46:29.685879 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/de7b40fd-d975-4605-88eb-fe5e00086535-config-data\") pod \"horizon-77dc4c7c69-h9tvx\" (UID: \"de7b40fd-d975-4605-88eb-fe5e00086535\") " pod="openstack/horizon-77dc4c7c69-h9tvx" Oct 11 04:46:29 crc kubenswrapper[4798]: I1011 04:46:29.685991 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/de7b40fd-d975-4605-88eb-fe5e00086535-scripts\") pod \"horizon-77dc4c7c69-h9tvx\" (UID: \"de7b40fd-d975-4605-88eb-fe5e00086535\") " pod="openstack/horizon-77dc4c7c69-h9tvx" Oct 11 04:46:29 crc kubenswrapper[4798]: I1011 04:46:29.686016 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/de7b40fd-d975-4605-88eb-fe5e00086535-logs\") pod \"horizon-77dc4c7c69-h9tvx\" (UID: \"de7b40fd-d975-4605-88eb-fe5e00086535\") " pod="openstack/horizon-77dc4c7c69-h9tvx" Oct 11 04:46:29 crc kubenswrapper[4798]: I1011 04:46:29.686147 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sx7r4\" (UniqueName: \"kubernetes.io/projected/de7b40fd-d975-4605-88eb-fe5e00086535-kube-api-access-sx7r4\") pod \"horizon-77dc4c7c69-h9tvx\" (UID: \"de7b40fd-d975-4605-88eb-fe5e00086535\") " pod="openstack/horizon-77dc4c7c69-h9tvx" Oct 11 04:46:29 crc kubenswrapper[4798]: I1011 04:46:29.686175 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/de7b40fd-d975-4605-88eb-fe5e00086535-horizon-secret-key\") pod \"horizon-77dc4c7c69-h9tvx\" (UID: \"de7b40fd-d975-4605-88eb-fe5e00086535\") " pod="openstack/horizon-77dc4c7c69-h9tvx" Oct 11 04:46:29 crc kubenswrapper[4798]: I1011 04:46:29.699651 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-db-create-b4ct7" Oct 11 04:46:29 crc kubenswrapper[4798]: I1011 04:46:29.725497 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Oct 11 04:46:29 crc kubenswrapper[4798]: I1011 04:46:29.770645 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-bf5b8bcbc-wjw4p"] Oct 11 04:46:29 crc kubenswrapper[4798]: I1011 04:46:29.772424 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-bf5b8bcbc-wjw4p" Oct 11 04:46:29 crc kubenswrapper[4798]: I1011 04:46:29.788382 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c446a2ed-b8a3-4043-b8f6-c709f4e97c2b-scripts\") pod \"glance-default-external-api-0\" (UID: \"c446a2ed-b8a3-4043-b8f6-c709f4e97c2b\") " pod="openstack/glance-default-external-api-0" Oct 11 04:46:29 crc kubenswrapper[4798]: I1011 04:46:29.788477 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c446a2ed-b8a3-4043-b8f6-c709f4e97c2b-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"c446a2ed-b8a3-4043-b8f6-c709f4e97c2b\") " pod="openstack/glance-default-external-api-0" Oct 11 04:46:29 crc kubenswrapper[4798]: I1011 04:46:29.788535 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c446a2ed-b8a3-4043-b8f6-c709f4e97c2b-logs\") pod \"glance-default-external-api-0\" (UID: \"c446a2ed-b8a3-4043-b8f6-c709f4e97c2b\") " pod="openstack/glance-default-external-api-0" Oct 11 04:46:29 crc kubenswrapper[4798]: I1011 04:46:29.788572 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c446a2ed-b8a3-4043-b8f6-c709f4e97c2b-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"c446a2ed-b8a3-4043-b8f6-c709f4e97c2b\") " pod="openstack/glance-default-external-api-0" Oct 11 04:46:29 crc kubenswrapper[4798]: I1011 04:46:29.788589 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/c446a2ed-b8a3-4043-b8f6-c709f4e97c2b-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"c446a2ed-b8a3-4043-b8f6-c709f4e97c2b\") " pod="openstack/glance-default-external-api-0" Oct 11 04:46:29 crc kubenswrapper[4798]: I1011 04:46:29.788624 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-external-api-0\" (UID: \"c446a2ed-b8a3-4043-b8f6-c709f4e97c2b\") " pod="openstack/glance-default-external-api-0" Oct 11 04:46:29 crc kubenswrapper[4798]: I1011 04:46:29.788666 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/de7b40fd-d975-4605-88eb-fe5e00086535-config-data\") pod \"horizon-77dc4c7c69-h9tvx\" (UID: \"de7b40fd-d975-4605-88eb-fe5e00086535\") " pod="openstack/horizon-77dc4c7c69-h9tvx" Oct 11 04:46:29 crc kubenswrapper[4798]: I1011 04:46:29.788698 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/de7b40fd-d975-4605-88eb-fe5e00086535-scripts\") pod \"horizon-77dc4c7c69-h9tvx\" (UID: \"de7b40fd-d975-4605-88eb-fe5e00086535\") " pod="openstack/horizon-77dc4c7c69-h9tvx" Oct 11 04:46:29 crc kubenswrapper[4798]: I1011 04:46:29.788728 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/de7b40fd-d975-4605-88eb-fe5e00086535-logs\") pod \"horizon-77dc4c7c69-h9tvx\" (UID: \"de7b40fd-d975-4605-88eb-fe5e00086535\") " pod="openstack/horizon-77dc4c7c69-h9tvx" Oct 11 04:46:29 crc kubenswrapper[4798]: I1011 04:46:29.788760 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/c446a2ed-b8a3-4043-b8f6-c709f4e97c2b-ceph\") pod \"glance-default-external-api-0\" (UID: \"c446a2ed-b8a3-4043-b8f6-c709f4e97c2b\") " pod="openstack/glance-default-external-api-0" Oct 11 04:46:29 crc kubenswrapper[4798]: I1011 04:46:29.788787 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sx7r4\" (UniqueName: \"kubernetes.io/projected/de7b40fd-d975-4605-88eb-fe5e00086535-kube-api-access-sx7r4\") pod \"horizon-77dc4c7c69-h9tvx\" (UID: \"de7b40fd-d975-4605-88eb-fe5e00086535\") " pod="openstack/horizon-77dc4c7c69-h9tvx" Oct 11 04:46:29 crc kubenswrapper[4798]: I1011 04:46:29.788808 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/de7b40fd-d975-4605-88eb-fe5e00086535-horizon-secret-key\") pod \"horizon-77dc4c7c69-h9tvx\" (UID: \"de7b40fd-d975-4605-88eb-fe5e00086535\") " pod="openstack/horizon-77dc4c7c69-h9tvx" Oct 11 04:46:29 crc kubenswrapper[4798]: I1011 04:46:29.788827 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c446a2ed-b8a3-4043-b8f6-c709f4e97c2b-config-data\") pod \"glance-default-external-api-0\" (UID: \"c446a2ed-b8a3-4043-b8f6-c709f4e97c2b\") " pod="openstack/glance-default-external-api-0" Oct 11 04:46:29 crc kubenswrapper[4798]: I1011 04:46:29.788863 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lz2v8\" (UniqueName: \"kubernetes.io/projected/c446a2ed-b8a3-4043-b8f6-c709f4e97c2b-kube-api-access-lz2v8\") pod \"glance-default-external-api-0\" (UID: \"c446a2ed-b8a3-4043-b8f6-c709f4e97c2b\") " pod="openstack/glance-default-external-api-0" Oct 11 04:46:29 crc kubenswrapper[4798]: I1011 04:46:29.790419 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/de7b40fd-d975-4605-88eb-fe5e00086535-scripts\") pod \"horizon-77dc4c7c69-h9tvx\" (UID: \"de7b40fd-d975-4605-88eb-fe5e00086535\") " pod="openstack/horizon-77dc4c7c69-h9tvx" Oct 11 04:46:29 crc kubenswrapper[4798]: I1011 04:46:29.791045 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/de7b40fd-d975-4605-88eb-fe5e00086535-logs\") pod \"horizon-77dc4c7c69-h9tvx\" (UID: \"de7b40fd-d975-4605-88eb-fe5e00086535\") " pod="openstack/horizon-77dc4c7c69-h9tvx" Oct 11 04:46:29 crc kubenswrapper[4798]: I1011 04:46:29.798998 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/de7b40fd-d975-4605-88eb-fe5e00086535-horizon-secret-key\") pod \"horizon-77dc4c7c69-h9tvx\" (UID: \"de7b40fd-d975-4605-88eb-fe5e00086535\") " pod="openstack/horizon-77dc4c7c69-h9tvx" Oct 11 04:46:29 crc kubenswrapper[4798]: I1011 04:46:29.812159 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/de7b40fd-d975-4605-88eb-fe5e00086535-config-data\") pod \"horizon-77dc4c7c69-h9tvx\" (UID: \"de7b40fd-d975-4605-88eb-fe5e00086535\") " pod="openstack/horizon-77dc4c7c69-h9tvx" Oct 11 04:46:29 crc kubenswrapper[4798]: I1011 04:46:29.848796 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sx7r4\" (UniqueName: \"kubernetes.io/projected/de7b40fd-d975-4605-88eb-fe5e00086535-kube-api-access-sx7r4\") pod \"horizon-77dc4c7c69-h9tvx\" (UID: \"de7b40fd-d975-4605-88eb-fe5e00086535\") " pod="openstack/horizon-77dc4c7c69-h9tvx" Oct 11 04:46:29 crc kubenswrapper[4798]: I1011 04:46:29.852825 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-bf5b8bcbc-wjw4p"] Oct 11 04:46:29 crc kubenswrapper[4798]: I1011 04:46:29.866202 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-77dc4c7c69-h9tvx" Oct 11 04:46:29 crc kubenswrapper[4798]: I1011 04:46:29.932469 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lz2v8\" (UniqueName: \"kubernetes.io/projected/c446a2ed-b8a3-4043-b8f6-c709f4e97c2b-kube-api-access-lz2v8\") pod \"glance-default-external-api-0\" (UID: \"c446a2ed-b8a3-4043-b8f6-c709f4e97c2b\") " pod="openstack/glance-default-external-api-0" Oct 11 04:46:29 crc kubenswrapper[4798]: I1011 04:46:29.932670 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c446a2ed-b8a3-4043-b8f6-c709f4e97c2b-scripts\") pod \"glance-default-external-api-0\" (UID: \"c446a2ed-b8a3-4043-b8f6-c709f4e97c2b\") " pod="openstack/glance-default-external-api-0" Oct 11 04:46:29 crc kubenswrapper[4798]: I1011 04:46:29.932716 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c446a2ed-b8a3-4043-b8f6-c709f4e97c2b-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"c446a2ed-b8a3-4043-b8f6-c709f4e97c2b\") " pod="openstack/glance-default-external-api-0" Oct 11 04:46:29 crc kubenswrapper[4798]: I1011 04:46:29.932749 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/778270e6-b796-4692-b341-fb2091a1576e-scripts\") pod \"horizon-bf5b8bcbc-wjw4p\" (UID: \"778270e6-b796-4692-b341-fb2091a1576e\") " pod="openstack/horizon-bf5b8bcbc-wjw4p" Oct 11 04:46:29 crc kubenswrapper[4798]: I1011 04:46:29.932781 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9r8cg\" (UniqueName: \"kubernetes.io/projected/778270e6-b796-4692-b341-fb2091a1576e-kube-api-access-9r8cg\") pod \"horizon-bf5b8bcbc-wjw4p\" (UID: \"778270e6-b796-4692-b341-fb2091a1576e\") " pod="openstack/horizon-bf5b8bcbc-wjw4p" Oct 11 04:46:29 crc kubenswrapper[4798]: I1011 04:46:29.932853 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c446a2ed-b8a3-4043-b8f6-c709f4e97c2b-logs\") pod \"glance-default-external-api-0\" (UID: \"c446a2ed-b8a3-4043-b8f6-c709f4e97c2b\") " pod="openstack/glance-default-external-api-0" Oct 11 04:46:29 crc kubenswrapper[4798]: I1011 04:46:29.932876 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/778270e6-b796-4692-b341-fb2091a1576e-logs\") pod \"horizon-bf5b8bcbc-wjw4p\" (UID: \"778270e6-b796-4692-b341-fb2091a1576e\") " pod="openstack/horizon-bf5b8bcbc-wjw4p" Oct 11 04:46:29 crc kubenswrapper[4798]: I1011 04:46:29.932900 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/778270e6-b796-4692-b341-fb2091a1576e-config-data\") pod \"horizon-bf5b8bcbc-wjw4p\" (UID: \"778270e6-b796-4692-b341-fb2091a1576e\") " pod="openstack/horizon-bf5b8bcbc-wjw4p" Oct 11 04:46:29 crc kubenswrapper[4798]: I1011 04:46:29.932971 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c446a2ed-b8a3-4043-b8f6-c709f4e97c2b-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"c446a2ed-b8a3-4043-b8f6-c709f4e97c2b\") " pod="openstack/glance-default-external-api-0" Oct 11 04:46:29 crc kubenswrapper[4798]: I1011 04:46:29.932997 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/c446a2ed-b8a3-4043-b8f6-c709f4e97c2b-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"c446a2ed-b8a3-4043-b8f6-c709f4e97c2b\") " pod="openstack/glance-default-external-api-0" Oct 11 04:46:29 crc kubenswrapper[4798]: I1011 04:46:29.933059 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-external-api-0\" (UID: \"c446a2ed-b8a3-4043-b8f6-c709f4e97c2b\") " pod="openstack/glance-default-external-api-0" Oct 11 04:46:29 crc kubenswrapper[4798]: I1011 04:46:29.933250 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/c446a2ed-b8a3-4043-b8f6-c709f4e97c2b-ceph\") pod \"glance-default-external-api-0\" (UID: \"c446a2ed-b8a3-4043-b8f6-c709f4e97c2b\") " pod="openstack/glance-default-external-api-0" Oct 11 04:46:29 crc kubenswrapper[4798]: I1011 04:46:29.933335 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c446a2ed-b8a3-4043-b8f6-c709f4e97c2b-config-data\") pod \"glance-default-external-api-0\" (UID: \"c446a2ed-b8a3-4043-b8f6-c709f4e97c2b\") " pod="openstack/glance-default-external-api-0" Oct 11 04:46:29 crc kubenswrapper[4798]: I1011 04:46:29.933375 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/778270e6-b796-4692-b341-fb2091a1576e-horizon-secret-key\") pod \"horizon-bf5b8bcbc-wjw4p\" (UID: \"778270e6-b796-4692-b341-fb2091a1576e\") " pod="openstack/horizon-bf5b8bcbc-wjw4p" Oct 11 04:46:29 crc kubenswrapper[4798]: I1011 04:46:29.952066 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Oct 11 04:46:29 crc kubenswrapper[4798]: I1011 04:46:29.959355 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c446a2ed-b8a3-4043-b8f6-c709f4e97c2b-scripts\") pod \"glance-default-external-api-0\" (UID: \"c446a2ed-b8a3-4043-b8f6-c709f4e97c2b\") " pod="openstack/glance-default-external-api-0" Oct 11 04:46:29 crc kubenswrapper[4798]: I1011 04:46:29.955673 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/c446a2ed-b8a3-4043-b8f6-c709f4e97c2b-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"c446a2ed-b8a3-4043-b8f6-c709f4e97c2b\") " pod="openstack/glance-default-external-api-0" Oct 11 04:46:29 crc kubenswrapper[4798]: I1011 04:46:29.955973 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c446a2ed-b8a3-4043-b8f6-c709f4e97c2b-logs\") pod \"glance-default-external-api-0\" (UID: \"c446a2ed-b8a3-4043-b8f6-c709f4e97c2b\") " pod="openstack/glance-default-external-api-0" Oct 11 04:46:29 crc kubenswrapper[4798]: I1011 04:46:29.966427 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c446a2ed-b8a3-4043-b8f6-c709f4e97c2b-config-data\") pod \"glance-default-external-api-0\" (UID: \"c446a2ed-b8a3-4043-b8f6-c709f4e97c2b\") " pod="openstack/glance-default-external-api-0" Oct 11 04:46:29 crc kubenswrapper[4798]: I1011 04:46:29.952069 4798 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-external-api-0\" (UID: \"c446a2ed-b8a3-4043-b8f6-c709f4e97c2b\") device mount path \"/mnt/openstack/pv10\"" pod="openstack/glance-default-external-api-0" Oct 11 04:46:29 crc kubenswrapper[4798]: I1011 04:46:29.969115 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Oct 11 04:46:29 crc kubenswrapper[4798]: I1011 04:46:29.969784 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/c446a2ed-b8a3-4043-b8f6-c709f4e97c2b-ceph\") pod \"glance-default-external-api-0\" (UID: \"c446a2ed-b8a3-4043-b8f6-c709f4e97c2b\") " pod="openstack/glance-default-external-api-0" Oct 11 04:46:29 crc kubenswrapper[4798]: I1011 04:46:29.971539 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c446a2ed-b8a3-4043-b8f6-c709f4e97c2b-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"c446a2ed-b8a3-4043-b8f6-c709f4e97c2b\") " pod="openstack/glance-default-external-api-0" Oct 11 04:46:29 crc kubenswrapper[4798]: I1011 04:46:29.973663 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Oct 11 04:46:29 crc kubenswrapper[4798]: I1011 04:46:29.985026 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Oct 11 04:46:29 crc kubenswrapper[4798]: I1011 04:46:29.985796 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Oct 11 04:46:29 crc kubenswrapper[4798]: I1011 04:46:29.990646 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c446a2ed-b8a3-4043-b8f6-c709f4e97c2b-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"c446a2ed-b8a3-4043-b8f6-c709f4e97c2b\") " pod="openstack/glance-default-external-api-0" Oct 11 04:46:30 crc kubenswrapper[4798]: I1011 04:46:30.016763 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lz2v8\" (UniqueName: \"kubernetes.io/projected/c446a2ed-b8a3-4043-b8f6-c709f4e97c2b-kube-api-access-lz2v8\") pod \"glance-default-external-api-0\" (UID: \"c446a2ed-b8a3-4043-b8f6-c709f4e97c2b\") " pod="openstack/glance-default-external-api-0" Oct 11 04:46:30 crc kubenswrapper[4798]: I1011 04:46:30.039045 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/778270e6-b796-4692-b341-fb2091a1576e-scripts\") pod \"horizon-bf5b8bcbc-wjw4p\" (UID: \"778270e6-b796-4692-b341-fb2091a1576e\") " pod="openstack/horizon-bf5b8bcbc-wjw4p" Oct 11 04:46:30 crc kubenswrapper[4798]: I1011 04:46:30.039120 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9r8cg\" (UniqueName: \"kubernetes.io/projected/778270e6-b796-4692-b341-fb2091a1576e-kube-api-access-9r8cg\") pod \"horizon-bf5b8bcbc-wjw4p\" (UID: \"778270e6-b796-4692-b341-fb2091a1576e\") " pod="openstack/horizon-bf5b8bcbc-wjw4p" Oct 11 04:46:30 crc kubenswrapper[4798]: I1011 04:46:30.039161 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/778270e6-b796-4692-b341-fb2091a1576e-logs\") pod \"horizon-bf5b8bcbc-wjw4p\" (UID: \"778270e6-b796-4692-b341-fb2091a1576e\") " pod="openstack/horizon-bf5b8bcbc-wjw4p" Oct 11 04:46:30 crc kubenswrapper[4798]: I1011 04:46:30.039184 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/778270e6-b796-4692-b341-fb2091a1576e-config-data\") pod \"horizon-bf5b8bcbc-wjw4p\" (UID: \"778270e6-b796-4692-b341-fb2091a1576e\") " pod="openstack/horizon-bf5b8bcbc-wjw4p" Oct 11 04:46:30 crc kubenswrapper[4798]: I1011 04:46:30.039317 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/778270e6-b796-4692-b341-fb2091a1576e-horizon-secret-key\") pod \"horizon-bf5b8bcbc-wjw4p\" (UID: \"778270e6-b796-4692-b341-fb2091a1576e\") " pod="openstack/horizon-bf5b8bcbc-wjw4p" Oct 11 04:46:30 crc kubenswrapper[4798]: I1011 04:46:30.043611 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/778270e6-b796-4692-b341-fb2091a1576e-scripts\") pod \"horizon-bf5b8bcbc-wjw4p\" (UID: \"778270e6-b796-4692-b341-fb2091a1576e\") " pod="openstack/horizon-bf5b8bcbc-wjw4p" Oct 11 04:46:30 crc kubenswrapper[4798]: I1011 04:46:30.043899 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/778270e6-b796-4692-b341-fb2091a1576e-logs\") pod \"horizon-bf5b8bcbc-wjw4p\" (UID: \"778270e6-b796-4692-b341-fb2091a1576e\") " pod="openstack/horizon-bf5b8bcbc-wjw4p" Oct 11 04:46:30 crc kubenswrapper[4798]: I1011 04:46:30.044256 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/778270e6-b796-4692-b341-fb2091a1576e-horizon-secret-key\") pod \"horizon-bf5b8bcbc-wjw4p\" (UID: \"778270e6-b796-4692-b341-fb2091a1576e\") " pod="openstack/horizon-bf5b8bcbc-wjw4p" Oct 11 04:46:30 crc kubenswrapper[4798]: I1011 04:46:30.044856 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/778270e6-b796-4692-b341-fb2091a1576e-config-data\") pod \"horizon-bf5b8bcbc-wjw4p\" (UID: \"778270e6-b796-4692-b341-fb2091a1576e\") " pod="openstack/horizon-bf5b8bcbc-wjw4p" Oct 11 04:46:30 crc kubenswrapper[4798]: I1011 04:46:30.069536 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-volume-volume1-0"] Oct 11 04:46:30 crc kubenswrapper[4798]: I1011 04:46:30.070231 4798 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Oct 11 04:46:30 crc kubenswrapper[4798]: I1011 04:46:30.163765 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c400462d-db9e-442e-99b7-68cd496770ab-config-data\") pod \"glance-default-internal-api-0\" (UID: \"c400462d-db9e-442e-99b7-68cd496770ab\") " pod="openstack/glance-default-internal-api-0" Oct 11 04:46:30 crc kubenswrapper[4798]: I1011 04:46:30.163942 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rt4mt\" (UniqueName: \"kubernetes.io/projected/c400462d-db9e-442e-99b7-68cd496770ab-kube-api-access-rt4mt\") pod \"glance-default-internal-api-0\" (UID: \"c400462d-db9e-442e-99b7-68cd496770ab\") " pod="openstack/glance-default-internal-api-0" Oct 11 04:46:30 crc kubenswrapper[4798]: I1011 04:46:30.164045 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/c400462d-db9e-442e-99b7-68cd496770ab-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"c400462d-db9e-442e-99b7-68cd496770ab\") " pod="openstack/glance-default-internal-api-0" Oct 11 04:46:30 crc kubenswrapper[4798]: I1011 04:46:30.164126 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c400462d-db9e-442e-99b7-68cd496770ab-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"c400462d-db9e-442e-99b7-68cd496770ab\") " pod="openstack/glance-default-internal-api-0" Oct 11 04:46:30 crc kubenswrapper[4798]: I1011 04:46:30.164213 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/c400462d-db9e-442e-99b7-68cd496770ab-ceph\") pod \"glance-default-internal-api-0\" (UID: \"c400462d-db9e-442e-99b7-68cd496770ab\") " pod="openstack/glance-default-internal-api-0" Oct 11 04:46:30 crc kubenswrapper[4798]: I1011 04:46:30.164294 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c400462d-db9e-442e-99b7-68cd496770ab-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"c400462d-db9e-442e-99b7-68cd496770ab\") " pod="openstack/glance-default-internal-api-0" Oct 11 04:46:30 crc kubenswrapper[4798]: I1011 04:46:30.164360 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c400462d-db9e-442e-99b7-68cd496770ab-logs\") pod \"glance-default-internal-api-0\" (UID: \"c400462d-db9e-442e-99b7-68cd496770ab\") " pod="openstack/glance-default-internal-api-0" Oct 11 04:46:30 crc kubenswrapper[4798]: I1011 04:46:30.165358 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c400462d-db9e-442e-99b7-68cd496770ab-scripts\") pod \"glance-default-internal-api-0\" (UID: \"c400462d-db9e-442e-99b7-68cd496770ab\") " pod="openstack/glance-default-internal-api-0" Oct 11 04:46:30 crc kubenswrapper[4798]: I1011 04:46:30.165474 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"glance-default-internal-api-0\" (UID: \"c400462d-db9e-442e-99b7-68cd496770ab\") " pod="openstack/glance-default-internal-api-0" Oct 11 04:46:30 crc kubenswrapper[4798]: I1011 04:46:30.184237 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9r8cg\" (UniqueName: \"kubernetes.io/projected/778270e6-b796-4692-b341-fb2091a1576e-kube-api-access-9r8cg\") pod \"horizon-bf5b8bcbc-wjw4p\" (UID: \"778270e6-b796-4692-b341-fb2091a1576e\") " pod="openstack/horizon-bf5b8bcbc-wjw4p" Oct 11 04:46:30 crc kubenswrapper[4798]: I1011 04:46:30.205289 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-external-api-0\" (UID: \"c446a2ed-b8a3-4043-b8f6-c709f4e97c2b\") " pod="openstack/glance-default-external-api-0" Oct 11 04:46:30 crc kubenswrapper[4798]: I1011 04:46:30.211063 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-volume-volume1-0" event={"ID":"6d69ada7-5b46-4e22-b304-8ca7957976c5","Type":"ContainerStarted","Data":"493462e9dbd739b6903e3b6d8bc6793841dece903365fb07efbef15437596312"} Oct 11 04:46:30 crc kubenswrapper[4798]: I1011 04:46:30.247686 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Oct 11 04:46:30 crc kubenswrapper[4798]: I1011 04:46:30.252187 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-backup-0"] Oct 11 04:46:30 crc kubenswrapper[4798]: I1011 04:46:30.276433 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c400462d-db9e-442e-99b7-68cd496770ab-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"c400462d-db9e-442e-99b7-68cd496770ab\") " pod="openstack/glance-default-internal-api-0" Oct 11 04:46:30 crc kubenswrapper[4798]: I1011 04:46:30.276507 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/c400462d-db9e-442e-99b7-68cd496770ab-ceph\") pod \"glance-default-internal-api-0\" (UID: \"c400462d-db9e-442e-99b7-68cd496770ab\") " pod="openstack/glance-default-internal-api-0" Oct 11 04:46:30 crc kubenswrapper[4798]: I1011 04:46:30.276539 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c400462d-db9e-442e-99b7-68cd496770ab-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"c400462d-db9e-442e-99b7-68cd496770ab\") " pod="openstack/glance-default-internal-api-0" Oct 11 04:46:30 crc kubenswrapper[4798]: I1011 04:46:30.276556 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c400462d-db9e-442e-99b7-68cd496770ab-logs\") pod \"glance-default-internal-api-0\" (UID: \"c400462d-db9e-442e-99b7-68cd496770ab\") " pod="openstack/glance-default-internal-api-0" Oct 11 04:46:30 crc kubenswrapper[4798]: I1011 04:46:30.276593 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c400462d-db9e-442e-99b7-68cd496770ab-scripts\") pod \"glance-default-internal-api-0\" (UID: \"c400462d-db9e-442e-99b7-68cd496770ab\") " pod="openstack/glance-default-internal-api-0" Oct 11 04:46:30 crc kubenswrapper[4798]: I1011 04:46:30.276622 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"glance-default-internal-api-0\" (UID: \"c400462d-db9e-442e-99b7-68cd496770ab\") " pod="openstack/glance-default-internal-api-0" Oct 11 04:46:30 crc kubenswrapper[4798]: I1011 04:46:30.276684 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c400462d-db9e-442e-99b7-68cd496770ab-config-data\") pod \"glance-default-internal-api-0\" (UID: \"c400462d-db9e-442e-99b7-68cd496770ab\") " pod="openstack/glance-default-internal-api-0" Oct 11 04:46:30 crc kubenswrapper[4798]: I1011 04:46:30.276709 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rt4mt\" (UniqueName: \"kubernetes.io/projected/c400462d-db9e-442e-99b7-68cd496770ab-kube-api-access-rt4mt\") pod \"glance-default-internal-api-0\" (UID: \"c400462d-db9e-442e-99b7-68cd496770ab\") " pod="openstack/glance-default-internal-api-0" Oct 11 04:46:30 crc kubenswrapper[4798]: I1011 04:46:30.276745 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/c400462d-db9e-442e-99b7-68cd496770ab-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"c400462d-db9e-442e-99b7-68cd496770ab\") " pod="openstack/glance-default-internal-api-0" Oct 11 04:46:30 crc kubenswrapper[4798]: I1011 04:46:30.277313 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/c400462d-db9e-442e-99b7-68cd496770ab-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"c400462d-db9e-442e-99b7-68cd496770ab\") " pod="openstack/glance-default-internal-api-0" Oct 11 04:46:30 crc kubenswrapper[4798]: I1011 04:46:30.277887 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c400462d-db9e-442e-99b7-68cd496770ab-logs\") pod \"glance-default-internal-api-0\" (UID: \"c400462d-db9e-442e-99b7-68cd496770ab\") " pod="openstack/glance-default-internal-api-0" Oct 11 04:46:30 crc kubenswrapper[4798]: I1011 04:46:30.278624 4798 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"glance-default-internal-api-0\" (UID: \"c400462d-db9e-442e-99b7-68cd496770ab\") device mount path \"/mnt/openstack/pv05\"" pod="openstack/glance-default-internal-api-0" Oct 11 04:46:30 crc kubenswrapper[4798]: I1011 04:46:30.284084 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c400462d-db9e-442e-99b7-68cd496770ab-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"c400462d-db9e-442e-99b7-68cd496770ab\") " pod="openstack/glance-default-internal-api-0" Oct 11 04:46:30 crc kubenswrapper[4798]: I1011 04:46:30.293819 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c400462d-db9e-442e-99b7-68cd496770ab-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"c400462d-db9e-442e-99b7-68cd496770ab\") " pod="openstack/glance-default-internal-api-0" Oct 11 04:46:30 crc kubenswrapper[4798]: I1011 04:46:30.301374 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rt4mt\" (UniqueName: \"kubernetes.io/projected/c400462d-db9e-442e-99b7-68cd496770ab-kube-api-access-rt4mt\") pod \"glance-default-internal-api-0\" (UID: \"c400462d-db9e-442e-99b7-68cd496770ab\") " pod="openstack/glance-default-internal-api-0" Oct 11 04:46:30 crc kubenswrapper[4798]: I1011 04:46:30.301453 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c400462d-db9e-442e-99b7-68cd496770ab-scripts\") pod \"glance-default-internal-api-0\" (UID: \"c400462d-db9e-442e-99b7-68cd496770ab\") " pod="openstack/glance-default-internal-api-0" Oct 11 04:46:30 crc kubenswrapper[4798]: I1011 04:46:30.319193 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c400462d-db9e-442e-99b7-68cd496770ab-config-data\") pod \"glance-default-internal-api-0\" (UID: \"c400462d-db9e-442e-99b7-68cd496770ab\") " pod="openstack/glance-default-internal-api-0" Oct 11 04:46:30 crc kubenswrapper[4798]: I1011 04:46:30.319710 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/c400462d-db9e-442e-99b7-68cd496770ab-ceph\") pod \"glance-default-internal-api-0\" (UID: \"c400462d-db9e-442e-99b7-68cd496770ab\") " pod="openstack/glance-default-internal-api-0" Oct 11 04:46:30 crc kubenswrapper[4798]: I1011 04:46:30.388179 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"glance-default-internal-api-0\" (UID: \"c400462d-db9e-442e-99b7-68cd496770ab\") " pod="openstack/glance-default-internal-api-0" Oct 11 04:46:30 crc kubenswrapper[4798]: I1011 04:46:30.478197 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-bf5b8bcbc-wjw4p" Oct 11 04:46:30 crc kubenswrapper[4798]: I1011 04:46:30.514302 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-db-create-b4ct7"] Oct 11 04:46:30 crc kubenswrapper[4798]: I1011 04:46:30.658291 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Oct 11 04:46:30 crc kubenswrapper[4798]: I1011 04:46:30.770590 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-77dc4c7c69-h9tvx"] Oct 11 04:46:31 crc kubenswrapper[4798]: I1011 04:46:31.017319 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Oct 11 04:46:31 crc kubenswrapper[4798]: I1011 04:46:31.044216 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-bf5b8bcbc-wjw4p"] Oct 11 04:46:31 crc kubenswrapper[4798]: I1011 04:46:31.246445 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-bf5b8bcbc-wjw4p" event={"ID":"778270e6-b796-4692-b341-fb2091a1576e","Type":"ContainerStarted","Data":"70e5667beecebae9f37f265e408448c7cb9c1f0239dfd162f3b76dd881634243"} Oct 11 04:46:31 crc kubenswrapper[4798]: I1011 04:46:31.255018 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-backup-0" event={"ID":"f9dae869-d1e5-48c4-a08c-5f9e76f8a581","Type":"ContainerStarted","Data":"83aecf17f97864535094f2f79d09f090b9124eed565f3fccc1df1032bd2c2e7f"} Oct 11 04:46:31 crc kubenswrapper[4798]: I1011 04:46:31.260053 4798 generic.go:334] "Generic (PLEG): container finished" podID="b663620b-a5c4-40ff-a611-749eecb67c0a" containerID="bd8458670fb406e95377e711b47a821d6b9334460b5e2734a02469762fdb5879" exitCode=0 Oct 11 04:46:31 crc kubenswrapper[4798]: I1011 04:46:31.262080 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-db-create-b4ct7" event={"ID":"b663620b-a5c4-40ff-a611-749eecb67c0a","Type":"ContainerDied","Data":"bd8458670fb406e95377e711b47a821d6b9334460b5e2734a02469762fdb5879"} Oct 11 04:46:31 crc kubenswrapper[4798]: I1011 04:46:31.262685 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-db-create-b4ct7" event={"ID":"b663620b-a5c4-40ff-a611-749eecb67c0a","Type":"ContainerStarted","Data":"dbff94e482adb64e71bd4fc549f611cb99c73448344e2807ac0ba13b267a15c5"} Oct 11 04:46:31 crc kubenswrapper[4798]: I1011 04:46:31.264439 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"c446a2ed-b8a3-4043-b8f6-c709f4e97c2b","Type":"ContainerStarted","Data":"1d8d7de75f906222dad66bef75514c2a7d25cac72bd2bb145874782b853ad36f"} Oct 11 04:46:31 crc kubenswrapper[4798]: I1011 04:46:31.288438 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-77dc4c7c69-h9tvx" event={"ID":"de7b40fd-d975-4605-88eb-fe5e00086535","Type":"ContainerStarted","Data":"3859a952aa5d31349105180abab7a173acedfebbe88f572b87c8534abff79241"} Oct 11 04:46:31 crc kubenswrapper[4798]: I1011 04:46:31.325095 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Oct 11 04:46:31 crc kubenswrapper[4798]: W1011 04:46:31.334551 4798 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc400462d_db9e_442e_99b7_68cd496770ab.slice/crio-d4b8614fc753155afe7dabac7635ca519878f12c9db42f4c94fc423a70444a7d WatchSource:0}: Error finding container d4b8614fc753155afe7dabac7635ca519878f12c9db42f4c94fc423a70444a7d: Status 404 returned error can't find the container with id d4b8614fc753155afe7dabac7635ca519878f12c9db42f4c94fc423a70444a7d Oct 11 04:46:32 crc kubenswrapper[4798]: I1011 04:46:32.335758 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-volume-volume1-0" event={"ID":"6d69ada7-5b46-4e22-b304-8ca7957976c5","Type":"ContainerStarted","Data":"66e9419c36c54ba0849b99104bbe9fced1e1e7d82205c8f9d125bc7bdfea0a02"} Oct 11 04:46:32 crc kubenswrapper[4798]: I1011 04:46:32.336348 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-volume-volume1-0" event={"ID":"6d69ada7-5b46-4e22-b304-8ca7957976c5","Type":"ContainerStarted","Data":"4d0da7a0dfff39b6eaec067f2321fe33cd9c3e8f991a3d2f0416907e08dacdd0"} Oct 11 04:46:32 crc kubenswrapper[4798]: I1011 04:46:32.343281 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"c446a2ed-b8a3-4043-b8f6-c709f4e97c2b","Type":"ContainerStarted","Data":"f1591be6a9c3885f41b5bade02c180259a748baf9488fb914eea389e853365b1"} Oct 11 04:46:32 crc kubenswrapper[4798]: I1011 04:46:32.348022 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-backup-0" event={"ID":"f9dae869-d1e5-48c4-a08c-5f9e76f8a581","Type":"ContainerStarted","Data":"cfc832fd891fd9a71d9443bfde0e90cdb5e6421850dee1f1c638cbcee9c387a3"} Oct 11 04:46:32 crc kubenswrapper[4798]: I1011 04:46:32.348076 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-backup-0" event={"ID":"f9dae869-d1e5-48c4-a08c-5f9e76f8a581","Type":"ContainerStarted","Data":"0af460b74b2b7922eb2f1ae8f8c6fd99538387e86166e677fed555e4066f4394"} Oct 11 04:46:32 crc kubenswrapper[4798]: I1011 04:46:32.350112 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"c400462d-db9e-442e-99b7-68cd496770ab","Type":"ContainerStarted","Data":"737c94d5a1539d719e51b8c3e8cb9366ab6fe60144a044f37f2b297e1b73c599"} Oct 11 04:46:32 crc kubenswrapper[4798]: I1011 04:46:32.350182 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"c400462d-db9e-442e-99b7-68cd496770ab","Type":"ContainerStarted","Data":"d4b8614fc753155afe7dabac7635ca519878f12c9db42f4c94fc423a70444a7d"} Oct 11 04:46:32 crc kubenswrapper[4798]: I1011 04:46:32.394282 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-volume-volume1-0" podStartSLOduration=3.111898196 podStartE2EDuration="4.394255209s" podCreationTimestamp="2025-10-11 04:46:28 +0000 UTC" firstStartedPulling="2025-10-11 04:46:30.069790874 +0000 UTC m=+3085.406080560" lastFinishedPulling="2025-10-11 04:46:31.352147887 +0000 UTC m=+3086.688437573" observedRunningTime="2025-10-11 04:46:32.371212928 +0000 UTC m=+3087.707502614" watchObservedRunningTime="2025-10-11 04:46:32.394255209 +0000 UTC m=+3087.730544895" Oct 11 04:46:32 crc kubenswrapper[4798]: I1011 04:46:32.404839 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-backup-0" podStartSLOduration=3.334105473 podStartE2EDuration="4.404809627s" podCreationTimestamp="2025-10-11 04:46:28 +0000 UTC" firstStartedPulling="2025-10-11 04:46:30.282246092 +0000 UTC m=+3085.618535778" lastFinishedPulling="2025-10-11 04:46:31.352950246 +0000 UTC m=+3086.689239932" observedRunningTime="2025-10-11 04:46:32.401639913 +0000 UTC m=+3087.737929589" watchObservedRunningTime="2025-10-11 04:46:32.404809627 +0000 UTC m=+3087.741099313" Oct 11 04:46:32 crc kubenswrapper[4798]: I1011 04:46:32.600426 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-77dc4c7c69-h9tvx"] Oct 11 04:46:32 crc kubenswrapper[4798]: I1011 04:46:32.740660 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Oct 11 04:46:32 crc kubenswrapper[4798]: I1011 04:46:32.789316 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-78d45886d-tlzfl"] Oct 11 04:46:32 crc kubenswrapper[4798]: I1011 04:46:32.791701 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-78d45886d-tlzfl" Oct 11 04:46:32 crc kubenswrapper[4798]: I1011 04:46:32.800275 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-horizon-svc" Oct 11 04:46:32 crc kubenswrapper[4798]: I1011 04:46:32.873886 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-78d45886d-tlzfl"] Oct 11 04:46:32 crc kubenswrapper[4798]: I1011 04:46:32.896817 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/0c09ff0d-de2b-4e1d-b39f-f467c8b03fb7-scripts\") pod \"horizon-78d45886d-tlzfl\" (UID: \"0c09ff0d-de2b-4e1d-b39f-f467c8b03fb7\") " pod="openstack/horizon-78d45886d-tlzfl" Oct 11 04:46:32 crc kubenswrapper[4798]: I1011 04:46:32.896868 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0c09ff0d-de2b-4e1d-b39f-f467c8b03fb7-combined-ca-bundle\") pod \"horizon-78d45886d-tlzfl\" (UID: \"0c09ff0d-de2b-4e1d-b39f-f467c8b03fb7\") " pod="openstack/horizon-78d45886d-tlzfl" Oct 11 04:46:32 crc kubenswrapper[4798]: I1011 04:46:32.896923 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tj6fh\" (UniqueName: \"kubernetes.io/projected/0c09ff0d-de2b-4e1d-b39f-f467c8b03fb7-kube-api-access-tj6fh\") pod \"horizon-78d45886d-tlzfl\" (UID: \"0c09ff0d-de2b-4e1d-b39f-f467c8b03fb7\") " pod="openstack/horizon-78d45886d-tlzfl" Oct 11 04:46:32 crc kubenswrapper[4798]: I1011 04:46:32.896946 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0c09ff0d-de2b-4e1d-b39f-f467c8b03fb7-logs\") pod \"horizon-78d45886d-tlzfl\" (UID: \"0c09ff0d-de2b-4e1d-b39f-f467c8b03fb7\") " pod="openstack/horizon-78d45886d-tlzfl" Oct 11 04:46:32 crc kubenswrapper[4798]: I1011 04:46:32.896994 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/0c09ff0d-de2b-4e1d-b39f-f467c8b03fb7-horizon-secret-key\") pod \"horizon-78d45886d-tlzfl\" (UID: \"0c09ff0d-de2b-4e1d-b39f-f467c8b03fb7\") " pod="openstack/horizon-78d45886d-tlzfl" Oct 11 04:46:32 crc kubenswrapper[4798]: I1011 04:46:32.897017 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/0c09ff0d-de2b-4e1d-b39f-f467c8b03fb7-horizon-tls-certs\") pod \"horizon-78d45886d-tlzfl\" (UID: \"0c09ff0d-de2b-4e1d-b39f-f467c8b03fb7\") " pod="openstack/horizon-78d45886d-tlzfl" Oct 11 04:46:32 crc kubenswrapper[4798]: I1011 04:46:32.897053 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/0c09ff0d-de2b-4e1d-b39f-f467c8b03fb7-config-data\") pod \"horizon-78d45886d-tlzfl\" (UID: \"0c09ff0d-de2b-4e1d-b39f-f467c8b03fb7\") " pod="openstack/horizon-78d45886d-tlzfl" Oct 11 04:46:32 crc kubenswrapper[4798]: I1011 04:46:32.902358 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Oct 11 04:46:32 crc kubenswrapper[4798]: I1011 04:46:32.907603 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-bf5b8bcbc-wjw4p"] Oct 11 04:46:32 crc kubenswrapper[4798]: I1011 04:46:32.916480 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-7b5998ffb-nn2rn"] Oct 11 04:46:32 crc kubenswrapper[4798]: I1011 04:46:32.918989 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-7b5998ffb-nn2rn" Oct 11 04:46:32 crc kubenswrapper[4798]: I1011 04:46:32.919326 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-7b5998ffb-nn2rn"] Oct 11 04:46:33 crc kubenswrapper[4798]: I1011 04:46:33.000668 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/0c09ff0d-de2b-4e1d-b39f-f467c8b03fb7-config-data\") pod \"horizon-78d45886d-tlzfl\" (UID: \"0c09ff0d-de2b-4e1d-b39f-f467c8b03fb7\") " pod="openstack/horizon-78d45886d-tlzfl" Oct 11 04:46:33 crc kubenswrapper[4798]: I1011 04:46:33.000733 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rjcd7\" (UniqueName: \"kubernetes.io/projected/a3fe5ce5-561f-4775-8024-6c4896079b30-kube-api-access-rjcd7\") pod \"horizon-7b5998ffb-nn2rn\" (UID: \"a3fe5ce5-561f-4775-8024-6c4896079b30\") " pod="openstack/horizon-7b5998ffb-nn2rn" Oct 11 04:46:33 crc kubenswrapper[4798]: I1011 04:46:33.000770 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a3fe5ce5-561f-4775-8024-6c4896079b30-combined-ca-bundle\") pod \"horizon-7b5998ffb-nn2rn\" (UID: \"a3fe5ce5-561f-4775-8024-6c4896079b30\") " pod="openstack/horizon-7b5998ffb-nn2rn" Oct 11 04:46:33 crc kubenswrapper[4798]: I1011 04:46:33.000795 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/0c09ff0d-de2b-4e1d-b39f-f467c8b03fb7-scripts\") pod \"horizon-78d45886d-tlzfl\" (UID: \"0c09ff0d-de2b-4e1d-b39f-f467c8b03fb7\") " pod="openstack/horizon-78d45886d-tlzfl" Oct 11 04:46:33 crc kubenswrapper[4798]: I1011 04:46:33.000818 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/a3fe5ce5-561f-4775-8024-6c4896079b30-config-data\") pod \"horizon-7b5998ffb-nn2rn\" (UID: \"a3fe5ce5-561f-4775-8024-6c4896079b30\") " pod="openstack/horizon-7b5998ffb-nn2rn" Oct 11 04:46:33 crc kubenswrapper[4798]: I1011 04:46:33.000843 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0c09ff0d-de2b-4e1d-b39f-f467c8b03fb7-combined-ca-bundle\") pod \"horizon-78d45886d-tlzfl\" (UID: \"0c09ff0d-de2b-4e1d-b39f-f467c8b03fb7\") " pod="openstack/horizon-78d45886d-tlzfl" Oct 11 04:46:33 crc kubenswrapper[4798]: I1011 04:46:33.000865 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/a3fe5ce5-561f-4775-8024-6c4896079b30-horizon-tls-certs\") pod \"horizon-7b5998ffb-nn2rn\" (UID: \"a3fe5ce5-561f-4775-8024-6c4896079b30\") " pod="openstack/horizon-7b5998ffb-nn2rn" Oct 11 04:46:33 crc kubenswrapper[4798]: I1011 04:46:33.000886 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/a3fe5ce5-561f-4775-8024-6c4896079b30-scripts\") pod \"horizon-7b5998ffb-nn2rn\" (UID: \"a3fe5ce5-561f-4775-8024-6c4896079b30\") " pod="openstack/horizon-7b5998ffb-nn2rn" Oct 11 04:46:33 crc kubenswrapper[4798]: I1011 04:46:33.000932 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tj6fh\" (UniqueName: \"kubernetes.io/projected/0c09ff0d-de2b-4e1d-b39f-f467c8b03fb7-kube-api-access-tj6fh\") pod \"horizon-78d45886d-tlzfl\" (UID: \"0c09ff0d-de2b-4e1d-b39f-f467c8b03fb7\") " pod="openstack/horizon-78d45886d-tlzfl" Oct 11 04:46:33 crc kubenswrapper[4798]: I1011 04:46:33.000961 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0c09ff0d-de2b-4e1d-b39f-f467c8b03fb7-logs\") pod \"horizon-78d45886d-tlzfl\" (UID: \"0c09ff0d-de2b-4e1d-b39f-f467c8b03fb7\") " pod="openstack/horizon-78d45886d-tlzfl" Oct 11 04:46:33 crc kubenswrapper[4798]: I1011 04:46:33.000983 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a3fe5ce5-561f-4775-8024-6c4896079b30-logs\") pod \"horizon-7b5998ffb-nn2rn\" (UID: \"a3fe5ce5-561f-4775-8024-6c4896079b30\") " pod="openstack/horizon-7b5998ffb-nn2rn" Oct 11 04:46:33 crc kubenswrapper[4798]: I1011 04:46:33.001014 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/a3fe5ce5-561f-4775-8024-6c4896079b30-horizon-secret-key\") pod \"horizon-7b5998ffb-nn2rn\" (UID: \"a3fe5ce5-561f-4775-8024-6c4896079b30\") " pod="openstack/horizon-7b5998ffb-nn2rn" Oct 11 04:46:33 crc kubenswrapper[4798]: I1011 04:46:33.001049 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/0c09ff0d-de2b-4e1d-b39f-f467c8b03fb7-horizon-secret-key\") pod \"horizon-78d45886d-tlzfl\" (UID: \"0c09ff0d-de2b-4e1d-b39f-f467c8b03fb7\") " pod="openstack/horizon-78d45886d-tlzfl" Oct 11 04:46:33 crc kubenswrapper[4798]: I1011 04:46:33.001073 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/0c09ff0d-de2b-4e1d-b39f-f467c8b03fb7-horizon-tls-certs\") pod \"horizon-78d45886d-tlzfl\" (UID: \"0c09ff0d-de2b-4e1d-b39f-f467c8b03fb7\") " pod="openstack/horizon-78d45886d-tlzfl" Oct 11 04:46:33 crc kubenswrapper[4798]: I1011 04:46:33.004839 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/0c09ff0d-de2b-4e1d-b39f-f467c8b03fb7-config-data\") pod \"horizon-78d45886d-tlzfl\" (UID: \"0c09ff0d-de2b-4e1d-b39f-f467c8b03fb7\") " pod="openstack/horizon-78d45886d-tlzfl" Oct 11 04:46:33 crc kubenswrapper[4798]: I1011 04:46:33.005293 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/0c09ff0d-de2b-4e1d-b39f-f467c8b03fb7-scripts\") pod \"horizon-78d45886d-tlzfl\" (UID: \"0c09ff0d-de2b-4e1d-b39f-f467c8b03fb7\") " pod="openstack/horizon-78d45886d-tlzfl" Oct 11 04:46:33 crc kubenswrapper[4798]: I1011 04:46:33.007208 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0c09ff0d-de2b-4e1d-b39f-f467c8b03fb7-logs\") pod \"horizon-78d45886d-tlzfl\" (UID: \"0c09ff0d-de2b-4e1d-b39f-f467c8b03fb7\") " pod="openstack/horizon-78d45886d-tlzfl" Oct 11 04:46:33 crc kubenswrapper[4798]: I1011 04:46:33.025508 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/0c09ff0d-de2b-4e1d-b39f-f467c8b03fb7-horizon-secret-key\") pod \"horizon-78d45886d-tlzfl\" (UID: \"0c09ff0d-de2b-4e1d-b39f-f467c8b03fb7\") " pod="openstack/horizon-78d45886d-tlzfl" Oct 11 04:46:33 crc kubenswrapper[4798]: I1011 04:46:33.042235 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0c09ff0d-de2b-4e1d-b39f-f467c8b03fb7-combined-ca-bundle\") pod \"horizon-78d45886d-tlzfl\" (UID: \"0c09ff0d-de2b-4e1d-b39f-f467c8b03fb7\") " pod="openstack/horizon-78d45886d-tlzfl" Oct 11 04:46:33 crc kubenswrapper[4798]: I1011 04:46:33.064226 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/0c09ff0d-de2b-4e1d-b39f-f467c8b03fb7-horizon-tls-certs\") pod \"horizon-78d45886d-tlzfl\" (UID: \"0c09ff0d-de2b-4e1d-b39f-f467c8b03fb7\") " pod="openstack/horizon-78d45886d-tlzfl" Oct 11 04:46:33 crc kubenswrapper[4798]: I1011 04:46:33.068047 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/manila-db-create-b4ct7" Oct 11 04:46:33 crc kubenswrapper[4798]: I1011 04:46:33.102904 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hbdv4\" (UniqueName: \"kubernetes.io/projected/b663620b-a5c4-40ff-a611-749eecb67c0a-kube-api-access-hbdv4\") pod \"b663620b-a5c4-40ff-a611-749eecb67c0a\" (UID: \"b663620b-a5c4-40ff-a611-749eecb67c0a\") " Oct 11 04:46:33 crc kubenswrapper[4798]: I1011 04:46:33.103167 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a3fe5ce5-561f-4775-8024-6c4896079b30-combined-ca-bundle\") pod \"horizon-7b5998ffb-nn2rn\" (UID: \"a3fe5ce5-561f-4775-8024-6c4896079b30\") " pod="openstack/horizon-7b5998ffb-nn2rn" Oct 11 04:46:33 crc kubenswrapper[4798]: I1011 04:46:33.103199 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/a3fe5ce5-561f-4775-8024-6c4896079b30-config-data\") pod \"horizon-7b5998ffb-nn2rn\" (UID: \"a3fe5ce5-561f-4775-8024-6c4896079b30\") " pod="openstack/horizon-7b5998ffb-nn2rn" Oct 11 04:46:33 crc kubenswrapper[4798]: I1011 04:46:33.103227 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/a3fe5ce5-561f-4775-8024-6c4896079b30-horizon-tls-certs\") pod \"horizon-7b5998ffb-nn2rn\" (UID: \"a3fe5ce5-561f-4775-8024-6c4896079b30\") " pod="openstack/horizon-7b5998ffb-nn2rn" Oct 11 04:46:33 crc kubenswrapper[4798]: I1011 04:46:33.103250 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/a3fe5ce5-561f-4775-8024-6c4896079b30-scripts\") pod \"horizon-7b5998ffb-nn2rn\" (UID: \"a3fe5ce5-561f-4775-8024-6c4896079b30\") " pod="openstack/horizon-7b5998ffb-nn2rn" Oct 11 04:46:33 crc kubenswrapper[4798]: I1011 04:46:33.103306 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a3fe5ce5-561f-4775-8024-6c4896079b30-logs\") pod \"horizon-7b5998ffb-nn2rn\" (UID: \"a3fe5ce5-561f-4775-8024-6c4896079b30\") " pod="openstack/horizon-7b5998ffb-nn2rn" Oct 11 04:46:33 crc kubenswrapper[4798]: I1011 04:46:33.103333 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/a3fe5ce5-561f-4775-8024-6c4896079b30-horizon-secret-key\") pod \"horizon-7b5998ffb-nn2rn\" (UID: \"a3fe5ce5-561f-4775-8024-6c4896079b30\") " pod="openstack/horizon-7b5998ffb-nn2rn" Oct 11 04:46:33 crc kubenswrapper[4798]: I1011 04:46:33.116845 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tj6fh\" (UniqueName: \"kubernetes.io/projected/0c09ff0d-de2b-4e1d-b39f-f467c8b03fb7-kube-api-access-tj6fh\") pod \"horizon-78d45886d-tlzfl\" (UID: \"0c09ff0d-de2b-4e1d-b39f-f467c8b03fb7\") " pod="openstack/horizon-78d45886d-tlzfl" Oct 11 04:46:33 crc kubenswrapper[4798]: I1011 04:46:33.128610 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rjcd7\" (UniqueName: \"kubernetes.io/projected/a3fe5ce5-561f-4775-8024-6c4896079b30-kube-api-access-rjcd7\") pod \"horizon-7b5998ffb-nn2rn\" (UID: \"a3fe5ce5-561f-4775-8024-6c4896079b30\") " pod="openstack/horizon-7b5998ffb-nn2rn" Oct 11 04:46:33 crc kubenswrapper[4798]: I1011 04:46:33.132830 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/a3fe5ce5-561f-4775-8024-6c4896079b30-config-data\") pod \"horizon-7b5998ffb-nn2rn\" (UID: \"a3fe5ce5-561f-4775-8024-6c4896079b30\") " pod="openstack/horizon-7b5998ffb-nn2rn" Oct 11 04:46:33 crc kubenswrapper[4798]: I1011 04:46:33.134435 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-78d45886d-tlzfl" Oct 11 04:46:33 crc kubenswrapper[4798]: I1011 04:46:33.145176 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/a3fe5ce5-561f-4775-8024-6c4896079b30-scripts\") pod \"horizon-7b5998ffb-nn2rn\" (UID: \"a3fe5ce5-561f-4775-8024-6c4896079b30\") " pod="openstack/horizon-7b5998ffb-nn2rn" Oct 11 04:46:33 crc kubenswrapper[4798]: I1011 04:46:33.156637 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b663620b-a5c4-40ff-a611-749eecb67c0a-kube-api-access-hbdv4" (OuterVolumeSpecName: "kube-api-access-hbdv4") pod "b663620b-a5c4-40ff-a611-749eecb67c0a" (UID: "b663620b-a5c4-40ff-a611-749eecb67c0a"). InnerVolumeSpecName "kube-api-access-hbdv4". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 04:46:33 crc kubenswrapper[4798]: I1011 04:46:33.158522 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a3fe5ce5-561f-4775-8024-6c4896079b30-logs\") pod \"horizon-7b5998ffb-nn2rn\" (UID: \"a3fe5ce5-561f-4775-8024-6c4896079b30\") " pod="openstack/horizon-7b5998ffb-nn2rn" Oct 11 04:46:33 crc kubenswrapper[4798]: I1011 04:46:33.161975 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/a3fe5ce5-561f-4775-8024-6c4896079b30-horizon-tls-certs\") pod \"horizon-7b5998ffb-nn2rn\" (UID: \"a3fe5ce5-561f-4775-8024-6c4896079b30\") " pod="openstack/horizon-7b5998ffb-nn2rn" Oct 11 04:46:33 crc kubenswrapper[4798]: I1011 04:46:33.176928 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/a3fe5ce5-561f-4775-8024-6c4896079b30-horizon-secret-key\") pod \"horizon-7b5998ffb-nn2rn\" (UID: \"a3fe5ce5-561f-4775-8024-6c4896079b30\") " pod="openstack/horizon-7b5998ffb-nn2rn" Oct 11 04:46:33 crc kubenswrapper[4798]: I1011 04:46:33.178535 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rjcd7\" (UniqueName: \"kubernetes.io/projected/a3fe5ce5-561f-4775-8024-6c4896079b30-kube-api-access-rjcd7\") pod \"horizon-7b5998ffb-nn2rn\" (UID: \"a3fe5ce5-561f-4775-8024-6c4896079b30\") " pod="openstack/horizon-7b5998ffb-nn2rn" Oct 11 04:46:33 crc kubenswrapper[4798]: I1011 04:46:33.244951 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hbdv4\" (UniqueName: \"kubernetes.io/projected/b663620b-a5c4-40ff-a611-749eecb67c0a-kube-api-access-hbdv4\") on node \"crc\" DevicePath \"\"" Oct 11 04:46:33 crc kubenswrapper[4798]: I1011 04:46:33.268102 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a3fe5ce5-561f-4775-8024-6c4896079b30-combined-ca-bundle\") pod \"horizon-7b5998ffb-nn2rn\" (UID: \"a3fe5ce5-561f-4775-8024-6c4896079b30\") " pod="openstack/horizon-7b5998ffb-nn2rn" Oct 11 04:46:33 crc kubenswrapper[4798]: I1011 04:46:33.298772 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-7b5998ffb-nn2rn" Oct 11 04:46:33 crc kubenswrapper[4798]: I1011 04:46:33.416517 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/manila-db-create-b4ct7" Oct 11 04:46:33 crc kubenswrapper[4798]: I1011 04:46:33.416510 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-db-create-b4ct7" event={"ID":"b663620b-a5c4-40ff-a611-749eecb67c0a","Type":"ContainerDied","Data":"dbff94e482adb64e71bd4fc549f611cb99c73448344e2807ac0ba13b267a15c5"} Oct 11 04:46:33 crc kubenswrapper[4798]: I1011 04:46:33.417700 4798 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="dbff94e482adb64e71bd4fc549f611cb99c73448344e2807ac0ba13b267a15c5" Oct 11 04:46:34 crc kubenswrapper[4798]: I1011 04:46:34.040002 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-7b5998ffb-nn2rn"] Oct 11 04:46:34 crc kubenswrapper[4798]: W1011 04:46:34.070093 4798 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda3fe5ce5_561f_4775_8024_6c4896079b30.slice/crio-77778890300fdcfa85745bd5e2fb4115e5f9fc3b8831ca5bc35c4df96aa5e4a3 WatchSource:0}: Error finding container 77778890300fdcfa85745bd5e2fb4115e5f9fc3b8831ca5bc35c4df96aa5e4a3: Status 404 returned error can't find the container with id 77778890300fdcfa85745bd5e2fb4115e5f9fc3b8831ca5bc35c4df96aa5e4a3 Oct 11 04:46:34 crc kubenswrapper[4798]: I1011 04:46:34.123886 4798 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-volume-volume1-0" Oct 11 04:46:34 crc kubenswrapper[4798]: I1011 04:46:34.164289 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-78d45886d-tlzfl"] Oct 11 04:46:34 crc kubenswrapper[4798]: I1011 04:46:34.200543 4798 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-backup-0" Oct 11 04:46:34 crc kubenswrapper[4798]: I1011 04:46:34.444368 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"c400462d-db9e-442e-99b7-68cd496770ab","Type":"ContainerStarted","Data":"c33d750742bdeb88b9be310509909e0b32fdc85ff1120188005133fb36d6f336"} Oct 11 04:46:34 crc kubenswrapper[4798]: I1011 04:46:34.444968 4798 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="c400462d-db9e-442e-99b7-68cd496770ab" containerName="glance-log" containerID="cri-o://737c94d5a1539d719e51b8c3e8cb9366ab6fe60144a044f37f2b297e1b73c599" gracePeriod=30 Oct 11 04:46:34 crc kubenswrapper[4798]: I1011 04:46:34.445576 4798 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="c400462d-db9e-442e-99b7-68cd496770ab" containerName="glance-httpd" containerID="cri-o://c33d750742bdeb88b9be310509909e0b32fdc85ff1120188005133fb36d6f336" gracePeriod=30 Oct 11 04:46:34 crc kubenswrapper[4798]: I1011 04:46:34.458638 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"c446a2ed-b8a3-4043-b8f6-c709f4e97c2b","Type":"ContainerStarted","Data":"ade5fafde9840ea79603055ecee8441fa8c5e21de53b024df37375149ab025ba"} Oct 11 04:46:34 crc kubenswrapper[4798]: I1011 04:46:34.458774 4798 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="c446a2ed-b8a3-4043-b8f6-c709f4e97c2b" containerName="glance-log" containerID="cri-o://f1591be6a9c3885f41b5bade02c180259a748baf9488fb914eea389e853365b1" gracePeriod=30 Oct 11 04:46:34 crc kubenswrapper[4798]: I1011 04:46:34.458820 4798 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="c446a2ed-b8a3-4043-b8f6-c709f4e97c2b" containerName="glance-httpd" containerID="cri-o://ade5fafde9840ea79603055ecee8441fa8c5e21de53b024df37375149ab025ba" gracePeriod=30 Oct 11 04:46:34 crc kubenswrapper[4798]: I1011 04:46:34.472992 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=5.472966343 podStartE2EDuration="5.472966343s" podCreationTimestamp="2025-10-11 04:46:29 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 04:46:34.466630915 +0000 UTC m=+3089.802920601" watchObservedRunningTime="2025-10-11 04:46:34.472966343 +0000 UTC m=+3089.809256029" Oct 11 04:46:34 crc kubenswrapper[4798]: I1011 04:46:34.482261 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-7b5998ffb-nn2rn" event={"ID":"a3fe5ce5-561f-4775-8024-6c4896079b30","Type":"ContainerStarted","Data":"77778890300fdcfa85745bd5e2fb4115e5f9fc3b8831ca5bc35c4df96aa5e4a3"} Oct 11 04:46:34 crc kubenswrapper[4798]: I1011 04:46:34.486774 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-78d45886d-tlzfl" event={"ID":"0c09ff0d-de2b-4e1d-b39f-f467c8b03fb7","Type":"ContainerStarted","Data":"800e9474268685164775ff59281f7153612fd0e947506cd883a82d994b3ae84b"} Oct 11 04:46:34 crc kubenswrapper[4798]: I1011 04:46:34.508662 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=5.508629661 podStartE2EDuration="5.508629661s" podCreationTimestamp="2025-10-11 04:46:29 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 04:46:34.499606279 +0000 UTC m=+3089.835895975" watchObservedRunningTime="2025-10-11 04:46:34.508629661 +0000 UTC m=+3089.844919347" Oct 11 04:46:34 crc kubenswrapper[4798]: E1011 04:46:34.710353 4798 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc400462d_db9e_442e_99b7_68cd496770ab.slice/crio-conmon-c33d750742bdeb88b9be310509909e0b32fdc85ff1120188005133fb36d6f336.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc446a2ed_b8a3_4043_b8f6_c709f4e97c2b.slice/crio-conmon-f1591be6a9c3885f41b5bade02c180259a748baf9488fb914eea389e853365b1.scope\": RecentStats: unable to find data in memory cache]" Oct 11 04:46:35 crc kubenswrapper[4798]: I1011 04:46:35.264866 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Oct 11 04:46:35 crc kubenswrapper[4798]: I1011 04:46:35.309077 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rt4mt\" (UniqueName: \"kubernetes.io/projected/c400462d-db9e-442e-99b7-68cd496770ab-kube-api-access-rt4mt\") pod \"c400462d-db9e-442e-99b7-68cd496770ab\" (UID: \"c400462d-db9e-442e-99b7-68cd496770ab\") " Oct 11 04:46:35 crc kubenswrapper[4798]: I1011 04:46:35.309542 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c400462d-db9e-442e-99b7-68cd496770ab-config-data\") pod \"c400462d-db9e-442e-99b7-68cd496770ab\" (UID: \"c400462d-db9e-442e-99b7-68cd496770ab\") " Oct 11 04:46:35 crc kubenswrapper[4798]: I1011 04:46:35.309673 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c400462d-db9e-442e-99b7-68cd496770ab-combined-ca-bundle\") pod \"c400462d-db9e-442e-99b7-68cd496770ab\" (UID: \"c400462d-db9e-442e-99b7-68cd496770ab\") " Oct 11 04:46:35 crc kubenswrapper[4798]: I1011 04:46:35.309702 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c400462d-db9e-442e-99b7-68cd496770ab-logs\") pod \"c400462d-db9e-442e-99b7-68cd496770ab\" (UID: \"c400462d-db9e-442e-99b7-68cd496770ab\") " Oct 11 04:46:35 crc kubenswrapper[4798]: I1011 04:46:35.309800 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c400462d-db9e-442e-99b7-68cd496770ab-scripts\") pod \"c400462d-db9e-442e-99b7-68cd496770ab\" (UID: \"c400462d-db9e-442e-99b7-68cd496770ab\") " Oct 11 04:46:35 crc kubenswrapper[4798]: I1011 04:46:35.309947 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"c400462d-db9e-442e-99b7-68cd496770ab\" (UID: \"c400462d-db9e-442e-99b7-68cd496770ab\") " Oct 11 04:46:35 crc kubenswrapper[4798]: I1011 04:46:35.310014 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/c400462d-db9e-442e-99b7-68cd496770ab-httpd-run\") pod \"c400462d-db9e-442e-99b7-68cd496770ab\" (UID: \"c400462d-db9e-442e-99b7-68cd496770ab\") " Oct 11 04:46:35 crc kubenswrapper[4798]: I1011 04:46:35.310125 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/c400462d-db9e-442e-99b7-68cd496770ab-ceph\") pod \"c400462d-db9e-442e-99b7-68cd496770ab\" (UID: \"c400462d-db9e-442e-99b7-68cd496770ab\") " Oct 11 04:46:35 crc kubenswrapper[4798]: I1011 04:46:35.310153 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c400462d-db9e-442e-99b7-68cd496770ab-internal-tls-certs\") pod \"c400462d-db9e-442e-99b7-68cd496770ab\" (UID: \"c400462d-db9e-442e-99b7-68cd496770ab\") " Oct 11 04:46:35 crc kubenswrapper[4798]: I1011 04:46:35.313766 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c400462d-db9e-442e-99b7-68cd496770ab-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "c400462d-db9e-442e-99b7-68cd496770ab" (UID: "c400462d-db9e-442e-99b7-68cd496770ab"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 04:46:35 crc kubenswrapper[4798]: I1011 04:46:35.316120 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c400462d-db9e-442e-99b7-68cd496770ab-logs" (OuterVolumeSpecName: "logs") pod "c400462d-db9e-442e-99b7-68cd496770ab" (UID: "c400462d-db9e-442e-99b7-68cd496770ab"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 04:46:35 crc kubenswrapper[4798]: I1011 04:46:35.319591 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage05-crc" (OuterVolumeSpecName: "glance") pod "c400462d-db9e-442e-99b7-68cd496770ab" (UID: "c400462d-db9e-442e-99b7-68cd496770ab"). InnerVolumeSpecName "local-storage05-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Oct 11 04:46:35 crc kubenswrapper[4798]: I1011 04:46:35.332365 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c400462d-db9e-442e-99b7-68cd496770ab-kube-api-access-rt4mt" (OuterVolumeSpecName: "kube-api-access-rt4mt") pod "c400462d-db9e-442e-99b7-68cd496770ab" (UID: "c400462d-db9e-442e-99b7-68cd496770ab"). InnerVolumeSpecName "kube-api-access-rt4mt". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 04:46:35 crc kubenswrapper[4798]: I1011 04:46:35.352100 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c400462d-db9e-442e-99b7-68cd496770ab-scripts" (OuterVolumeSpecName: "scripts") pod "c400462d-db9e-442e-99b7-68cd496770ab" (UID: "c400462d-db9e-442e-99b7-68cd496770ab"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:46:35 crc kubenswrapper[4798]: I1011 04:46:35.352299 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c400462d-db9e-442e-99b7-68cd496770ab-ceph" (OuterVolumeSpecName: "ceph") pod "c400462d-db9e-442e-99b7-68cd496770ab" (UID: "c400462d-db9e-442e-99b7-68cd496770ab"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 04:46:35 crc kubenswrapper[4798]: I1011 04:46:35.381576 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c400462d-db9e-442e-99b7-68cd496770ab-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c400462d-db9e-442e-99b7-68cd496770ab" (UID: "c400462d-db9e-442e-99b7-68cd496770ab"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:46:35 crc kubenswrapper[4798]: I1011 04:46:35.393759 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Oct 11 04:46:35 crc kubenswrapper[4798]: I1011 04:46:35.412844 4798 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") on node \"crc\" " Oct 11 04:46:35 crc kubenswrapper[4798]: I1011 04:46:35.412887 4798 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/c400462d-db9e-442e-99b7-68cd496770ab-httpd-run\") on node \"crc\" DevicePath \"\"" Oct 11 04:46:35 crc kubenswrapper[4798]: I1011 04:46:35.412898 4798 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/c400462d-db9e-442e-99b7-68cd496770ab-ceph\") on node \"crc\" DevicePath \"\"" Oct 11 04:46:35 crc kubenswrapper[4798]: I1011 04:46:35.412930 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rt4mt\" (UniqueName: \"kubernetes.io/projected/c400462d-db9e-442e-99b7-68cd496770ab-kube-api-access-rt4mt\") on node \"crc\" DevicePath \"\"" Oct 11 04:46:35 crc kubenswrapper[4798]: I1011 04:46:35.412941 4798 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c400462d-db9e-442e-99b7-68cd496770ab-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 11 04:46:35 crc kubenswrapper[4798]: I1011 04:46:35.412952 4798 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c400462d-db9e-442e-99b7-68cd496770ab-logs\") on node \"crc\" DevicePath \"\"" Oct 11 04:46:35 crc kubenswrapper[4798]: I1011 04:46:35.412959 4798 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c400462d-db9e-442e-99b7-68cd496770ab-scripts\") on node \"crc\" DevicePath \"\"" Oct 11 04:46:35 crc kubenswrapper[4798]: I1011 04:46:35.416212 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c400462d-db9e-442e-99b7-68cd496770ab-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "c400462d-db9e-442e-99b7-68cd496770ab" (UID: "c400462d-db9e-442e-99b7-68cd496770ab"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:46:35 crc kubenswrapper[4798]: I1011 04:46:35.461516 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c400462d-db9e-442e-99b7-68cd496770ab-config-data" (OuterVolumeSpecName: "config-data") pod "c400462d-db9e-442e-99b7-68cd496770ab" (UID: "c400462d-db9e-442e-99b7-68cd496770ab"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:46:35 crc kubenswrapper[4798]: I1011 04:46:35.477178 4798 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage05-crc" (UniqueName: "kubernetes.io/local-volume/local-storage05-crc") on node "crc" Oct 11 04:46:35 crc kubenswrapper[4798]: I1011 04:46:35.514910 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lz2v8\" (UniqueName: \"kubernetes.io/projected/c446a2ed-b8a3-4043-b8f6-c709f4e97c2b-kube-api-access-lz2v8\") pod \"c446a2ed-b8a3-4043-b8f6-c709f4e97c2b\" (UID: \"c446a2ed-b8a3-4043-b8f6-c709f4e97c2b\") " Oct 11 04:46:35 crc kubenswrapper[4798]: I1011 04:46:35.515018 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c446a2ed-b8a3-4043-b8f6-c709f4e97c2b-public-tls-certs\") pod \"c446a2ed-b8a3-4043-b8f6-c709f4e97c2b\" (UID: \"c446a2ed-b8a3-4043-b8f6-c709f4e97c2b\") " Oct 11 04:46:35 crc kubenswrapper[4798]: I1011 04:46:35.516563 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c446a2ed-b8a3-4043-b8f6-c709f4e97c2b-logs\") pod \"c446a2ed-b8a3-4043-b8f6-c709f4e97c2b\" (UID: \"c446a2ed-b8a3-4043-b8f6-c709f4e97c2b\") " Oct 11 04:46:35 crc kubenswrapper[4798]: I1011 04:46:35.516601 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c446a2ed-b8a3-4043-b8f6-c709f4e97c2b-config-data\") pod \"c446a2ed-b8a3-4043-b8f6-c709f4e97c2b\" (UID: \"c446a2ed-b8a3-4043-b8f6-c709f4e97c2b\") " Oct 11 04:46:35 crc kubenswrapper[4798]: I1011 04:46:35.516653 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"c446a2ed-b8a3-4043-b8f6-c709f4e97c2b\" (UID: \"c446a2ed-b8a3-4043-b8f6-c709f4e97c2b\") " Oct 11 04:46:35 crc kubenswrapper[4798]: I1011 04:46:35.517727 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c446a2ed-b8a3-4043-b8f6-c709f4e97c2b-combined-ca-bundle\") pod \"c446a2ed-b8a3-4043-b8f6-c709f4e97c2b\" (UID: \"c446a2ed-b8a3-4043-b8f6-c709f4e97c2b\") " Oct 11 04:46:35 crc kubenswrapper[4798]: I1011 04:46:35.517761 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c446a2ed-b8a3-4043-b8f6-c709f4e97c2b-scripts\") pod \"c446a2ed-b8a3-4043-b8f6-c709f4e97c2b\" (UID: \"c446a2ed-b8a3-4043-b8f6-c709f4e97c2b\") " Oct 11 04:46:35 crc kubenswrapper[4798]: I1011 04:46:35.517794 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/c446a2ed-b8a3-4043-b8f6-c709f4e97c2b-httpd-run\") pod \"c446a2ed-b8a3-4043-b8f6-c709f4e97c2b\" (UID: \"c446a2ed-b8a3-4043-b8f6-c709f4e97c2b\") " Oct 11 04:46:35 crc kubenswrapper[4798]: I1011 04:46:35.517832 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/c446a2ed-b8a3-4043-b8f6-c709f4e97c2b-ceph\") pod \"c446a2ed-b8a3-4043-b8f6-c709f4e97c2b\" (UID: \"c446a2ed-b8a3-4043-b8f6-c709f4e97c2b\") " Oct 11 04:46:35 crc kubenswrapper[4798]: I1011 04:46:35.518426 4798 reconciler_common.go:293] "Volume detached for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") on node \"crc\" DevicePath \"\"" Oct 11 04:46:35 crc kubenswrapper[4798]: I1011 04:46:35.518439 4798 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c400462d-db9e-442e-99b7-68cd496770ab-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Oct 11 04:46:35 crc kubenswrapper[4798]: I1011 04:46:35.518450 4798 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c400462d-db9e-442e-99b7-68cd496770ab-config-data\") on node \"crc\" DevicePath \"\"" Oct 11 04:46:35 crc kubenswrapper[4798]: I1011 04:46:35.519069 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c446a2ed-b8a3-4043-b8f6-c709f4e97c2b-logs" (OuterVolumeSpecName: "logs") pod "c446a2ed-b8a3-4043-b8f6-c709f4e97c2b" (UID: "c446a2ed-b8a3-4043-b8f6-c709f4e97c2b"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 04:46:35 crc kubenswrapper[4798]: I1011 04:46:35.522289 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c446a2ed-b8a3-4043-b8f6-c709f4e97c2b-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "c446a2ed-b8a3-4043-b8f6-c709f4e97c2b" (UID: "c446a2ed-b8a3-4043-b8f6-c709f4e97c2b"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 04:46:35 crc kubenswrapper[4798]: I1011 04:46:35.523673 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c446a2ed-b8a3-4043-b8f6-c709f4e97c2b-ceph" (OuterVolumeSpecName: "ceph") pod "c446a2ed-b8a3-4043-b8f6-c709f4e97c2b" (UID: "c446a2ed-b8a3-4043-b8f6-c709f4e97c2b"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 04:46:35 crc kubenswrapper[4798]: I1011 04:46:35.527649 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage10-crc" (OuterVolumeSpecName: "glance") pod "c446a2ed-b8a3-4043-b8f6-c709f4e97c2b" (UID: "c446a2ed-b8a3-4043-b8f6-c709f4e97c2b"). InnerVolumeSpecName "local-storage10-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Oct 11 04:46:35 crc kubenswrapper[4798]: I1011 04:46:35.531010 4798 generic.go:334] "Generic (PLEG): container finished" podID="c400462d-db9e-442e-99b7-68cd496770ab" containerID="c33d750742bdeb88b9be310509909e0b32fdc85ff1120188005133fb36d6f336" exitCode=0 Oct 11 04:46:35 crc kubenswrapper[4798]: I1011 04:46:35.531059 4798 generic.go:334] "Generic (PLEG): container finished" podID="c400462d-db9e-442e-99b7-68cd496770ab" containerID="737c94d5a1539d719e51b8c3e8cb9366ab6fe60144a044f37f2b297e1b73c599" exitCode=143 Oct 11 04:46:35 crc kubenswrapper[4798]: I1011 04:46:35.531116 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"c400462d-db9e-442e-99b7-68cd496770ab","Type":"ContainerDied","Data":"c33d750742bdeb88b9be310509909e0b32fdc85ff1120188005133fb36d6f336"} Oct 11 04:46:35 crc kubenswrapper[4798]: I1011 04:46:35.531156 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"c400462d-db9e-442e-99b7-68cd496770ab","Type":"ContainerDied","Data":"737c94d5a1539d719e51b8c3e8cb9366ab6fe60144a044f37f2b297e1b73c599"} Oct 11 04:46:35 crc kubenswrapper[4798]: I1011 04:46:35.531168 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"c400462d-db9e-442e-99b7-68cd496770ab","Type":"ContainerDied","Data":"d4b8614fc753155afe7dabac7635ca519878f12c9db42f4c94fc423a70444a7d"} Oct 11 04:46:35 crc kubenswrapper[4798]: I1011 04:46:35.531186 4798 scope.go:117] "RemoveContainer" containerID="c33d750742bdeb88b9be310509909e0b32fdc85ff1120188005133fb36d6f336" Oct 11 04:46:35 crc kubenswrapper[4798]: I1011 04:46:35.531354 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Oct 11 04:46:35 crc kubenswrapper[4798]: I1011 04:46:35.536151 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c446a2ed-b8a3-4043-b8f6-c709f4e97c2b-kube-api-access-lz2v8" (OuterVolumeSpecName: "kube-api-access-lz2v8") pod "c446a2ed-b8a3-4043-b8f6-c709f4e97c2b" (UID: "c446a2ed-b8a3-4043-b8f6-c709f4e97c2b"). InnerVolumeSpecName "kube-api-access-lz2v8". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 04:46:35 crc kubenswrapper[4798]: I1011 04:46:35.540434 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c446a2ed-b8a3-4043-b8f6-c709f4e97c2b-scripts" (OuterVolumeSpecName: "scripts") pod "c446a2ed-b8a3-4043-b8f6-c709f4e97c2b" (UID: "c446a2ed-b8a3-4043-b8f6-c709f4e97c2b"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:46:35 crc kubenswrapper[4798]: I1011 04:46:35.573045 4798 generic.go:334] "Generic (PLEG): container finished" podID="c446a2ed-b8a3-4043-b8f6-c709f4e97c2b" containerID="ade5fafde9840ea79603055ecee8441fa8c5e21de53b024df37375149ab025ba" exitCode=0 Oct 11 04:46:35 crc kubenswrapper[4798]: I1011 04:46:35.573099 4798 generic.go:334] "Generic (PLEG): container finished" podID="c446a2ed-b8a3-4043-b8f6-c709f4e97c2b" containerID="f1591be6a9c3885f41b5bade02c180259a748baf9488fb914eea389e853365b1" exitCode=143 Oct 11 04:46:35 crc kubenswrapper[4798]: I1011 04:46:35.573131 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"c446a2ed-b8a3-4043-b8f6-c709f4e97c2b","Type":"ContainerDied","Data":"ade5fafde9840ea79603055ecee8441fa8c5e21de53b024df37375149ab025ba"} Oct 11 04:46:35 crc kubenswrapper[4798]: I1011 04:46:35.573172 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"c446a2ed-b8a3-4043-b8f6-c709f4e97c2b","Type":"ContainerDied","Data":"f1591be6a9c3885f41b5bade02c180259a748baf9488fb914eea389e853365b1"} Oct 11 04:46:35 crc kubenswrapper[4798]: I1011 04:46:35.573191 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"c446a2ed-b8a3-4043-b8f6-c709f4e97c2b","Type":"ContainerDied","Data":"1d8d7de75f906222dad66bef75514c2a7d25cac72bd2bb145874782b853ad36f"} Oct 11 04:46:35 crc kubenswrapper[4798]: I1011 04:46:35.573285 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Oct 11 04:46:35 crc kubenswrapper[4798]: I1011 04:46:35.629555 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Oct 11 04:46:35 crc kubenswrapper[4798]: I1011 04:46:35.631993 4798 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c446a2ed-b8a3-4043-b8f6-c709f4e97c2b-scripts\") on node \"crc\" DevicePath \"\"" Oct 11 04:46:35 crc kubenswrapper[4798]: I1011 04:46:35.632021 4798 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/c446a2ed-b8a3-4043-b8f6-c709f4e97c2b-httpd-run\") on node \"crc\" DevicePath \"\"" Oct 11 04:46:35 crc kubenswrapper[4798]: I1011 04:46:35.632034 4798 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/c446a2ed-b8a3-4043-b8f6-c709f4e97c2b-ceph\") on node \"crc\" DevicePath \"\"" Oct 11 04:46:35 crc kubenswrapper[4798]: I1011 04:46:35.632043 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lz2v8\" (UniqueName: \"kubernetes.io/projected/c446a2ed-b8a3-4043-b8f6-c709f4e97c2b-kube-api-access-lz2v8\") on node \"crc\" DevicePath \"\"" Oct 11 04:46:35 crc kubenswrapper[4798]: I1011 04:46:35.632054 4798 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c446a2ed-b8a3-4043-b8f6-c709f4e97c2b-logs\") on node \"crc\" DevicePath \"\"" Oct 11 04:46:35 crc kubenswrapper[4798]: I1011 04:46:35.632086 4798 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") on node \"crc\" " Oct 11 04:46:35 crc kubenswrapper[4798]: I1011 04:46:35.643086 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c446a2ed-b8a3-4043-b8f6-c709f4e97c2b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c446a2ed-b8a3-4043-b8f6-c709f4e97c2b" (UID: "c446a2ed-b8a3-4043-b8f6-c709f4e97c2b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:46:35 crc kubenswrapper[4798]: I1011 04:46:35.663053 4798 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Oct 11 04:46:35 crc kubenswrapper[4798]: I1011 04:46:35.663649 4798 scope.go:117] "RemoveContainer" containerID="737c94d5a1539d719e51b8c3e8cb9366ab6fe60144a044f37f2b297e1b73c599" Oct 11 04:46:35 crc kubenswrapper[4798]: I1011 04:46:35.682234 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Oct 11 04:46:35 crc kubenswrapper[4798]: E1011 04:46:35.683696 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c446a2ed-b8a3-4043-b8f6-c709f4e97c2b" containerName="glance-httpd" Oct 11 04:46:35 crc kubenswrapper[4798]: I1011 04:46:35.683749 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="c446a2ed-b8a3-4043-b8f6-c709f4e97c2b" containerName="glance-httpd" Oct 11 04:46:35 crc kubenswrapper[4798]: E1011 04:46:35.683763 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c446a2ed-b8a3-4043-b8f6-c709f4e97c2b" containerName="glance-log" Oct 11 04:46:35 crc kubenswrapper[4798]: I1011 04:46:35.683771 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="c446a2ed-b8a3-4043-b8f6-c709f4e97c2b" containerName="glance-log" Oct 11 04:46:35 crc kubenswrapper[4798]: E1011 04:46:35.683788 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b663620b-a5c4-40ff-a611-749eecb67c0a" containerName="mariadb-database-create" Oct 11 04:46:35 crc kubenswrapper[4798]: I1011 04:46:35.683798 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="b663620b-a5c4-40ff-a611-749eecb67c0a" containerName="mariadb-database-create" Oct 11 04:46:35 crc kubenswrapper[4798]: E1011 04:46:35.683842 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c400462d-db9e-442e-99b7-68cd496770ab" containerName="glance-log" Oct 11 04:46:35 crc kubenswrapper[4798]: I1011 04:46:35.683852 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="c400462d-db9e-442e-99b7-68cd496770ab" containerName="glance-log" Oct 11 04:46:35 crc kubenswrapper[4798]: E1011 04:46:35.683869 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c400462d-db9e-442e-99b7-68cd496770ab" containerName="glance-httpd" Oct 11 04:46:35 crc kubenswrapper[4798]: I1011 04:46:35.683875 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="c400462d-db9e-442e-99b7-68cd496770ab" containerName="glance-httpd" Oct 11 04:46:35 crc kubenswrapper[4798]: I1011 04:46:35.684246 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="b663620b-a5c4-40ff-a611-749eecb67c0a" containerName="mariadb-database-create" Oct 11 04:46:35 crc kubenswrapper[4798]: I1011 04:46:35.684276 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="c400462d-db9e-442e-99b7-68cd496770ab" containerName="glance-log" Oct 11 04:46:35 crc kubenswrapper[4798]: I1011 04:46:35.684324 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="c446a2ed-b8a3-4043-b8f6-c709f4e97c2b" containerName="glance-log" Oct 11 04:46:35 crc kubenswrapper[4798]: I1011 04:46:35.684340 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="c400462d-db9e-442e-99b7-68cd496770ab" containerName="glance-httpd" Oct 11 04:46:35 crc kubenswrapper[4798]: I1011 04:46:35.684360 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="c446a2ed-b8a3-4043-b8f6-c709f4e97c2b" containerName="glance-httpd" Oct 11 04:46:35 crc kubenswrapper[4798]: I1011 04:46:35.686693 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Oct 11 04:46:35 crc kubenswrapper[4798]: I1011 04:46:35.694032 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Oct 11 04:46:35 crc kubenswrapper[4798]: I1011 04:46:35.695547 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Oct 11 04:46:35 crc kubenswrapper[4798]: I1011 04:46:35.698614 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c446a2ed-b8a3-4043-b8f6-c709f4e97c2b-config-data" (OuterVolumeSpecName: "config-data") pod "c446a2ed-b8a3-4043-b8f6-c709f4e97c2b" (UID: "c446a2ed-b8a3-4043-b8f6-c709f4e97c2b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:46:35 crc kubenswrapper[4798]: I1011 04:46:35.715170 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Oct 11 04:46:35 crc kubenswrapper[4798]: I1011 04:46:35.721299 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c446a2ed-b8a3-4043-b8f6-c709f4e97c2b-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "c446a2ed-b8a3-4043-b8f6-c709f4e97c2b" (UID: "c446a2ed-b8a3-4043-b8f6-c709f4e97c2b"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:46:35 crc kubenswrapper[4798]: I1011 04:46:35.731751 4798 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage10-crc" (UniqueName: "kubernetes.io/local-volume/local-storage10-crc") on node "crc" Oct 11 04:46:35 crc kubenswrapper[4798]: I1011 04:46:35.734871 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/53c9a7f3-ce03-406a-8c8a-56f59838bf6b-logs\") pod \"glance-default-internal-api-0\" (UID: \"53c9a7f3-ce03-406a-8c8a-56f59838bf6b\") " pod="openstack/glance-default-internal-api-0" Oct 11 04:46:35 crc kubenswrapper[4798]: I1011 04:46:35.735007 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/53c9a7f3-ce03-406a-8c8a-56f59838bf6b-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"53c9a7f3-ce03-406a-8c8a-56f59838bf6b\") " pod="openstack/glance-default-internal-api-0" Oct 11 04:46:35 crc kubenswrapper[4798]: I1011 04:46:35.735141 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/53c9a7f3-ce03-406a-8c8a-56f59838bf6b-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"53c9a7f3-ce03-406a-8c8a-56f59838bf6b\") " pod="openstack/glance-default-internal-api-0" Oct 11 04:46:35 crc kubenswrapper[4798]: I1011 04:46:35.735217 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"glance-default-internal-api-0\" (UID: \"53c9a7f3-ce03-406a-8c8a-56f59838bf6b\") " pod="openstack/glance-default-internal-api-0" Oct 11 04:46:35 crc kubenswrapper[4798]: I1011 04:46:35.735307 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/53c9a7f3-ce03-406a-8c8a-56f59838bf6b-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"53c9a7f3-ce03-406a-8c8a-56f59838bf6b\") " pod="openstack/glance-default-internal-api-0" Oct 11 04:46:35 crc kubenswrapper[4798]: I1011 04:46:35.735641 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/53c9a7f3-ce03-406a-8c8a-56f59838bf6b-config-data\") pod \"glance-default-internal-api-0\" (UID: \"53c9a7f3-ce03-406a-8c8a-56f59838bf6b\") " pod="openstack/glance-default-internal-api-0" Oct 11 04:46:35 crc kubenswrapper[4798]: I1011 04:46:35.735855 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/53c9a7f3-ce03-406a-8c8a-56f59838bf6b-ceph\") pod \"glance-default-internal-api-0\" (UID: \"53c9a7f3-ce03-406a-8c8a-56f59838bf6b\") " pod="openstack/glance-default-internal-api-0" Oct 11 04:46:35 crc kubenswrapper[4798]: I1011 04:46:35.735891 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/53c9a7f3-ce03-406a-8c8a-56f59838bf6b-scripts\") pod \"glance-default-internal-api-0\" (UID: \"53c9a7f3-ce03-406a-8c8a-56f59838bf6b\") " pod="openstack/glance-default-internal-api-0" Oct 11 04:46:35 crc kubenswrapper[4798]: I1011 04:46:35.735944 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ln78b\" (UniqueName: \"kubernetes.io/projected/53c9a7f3-ce03-406a-8c8a-56f59838bf6b-kube-api-access-ln78b\") pod \"glance-default-internal-api-0\" (UID: \"53c9a7f3-ce03-406a-8c8a-56f59838bf6b\") " pod="openstack/glance-default-internal-api-0" Oct 11 04:46:35 crc kubenswrapper[4798]: I1011 04:46:35.735851 4798 scope.go:117] "RemoveContainer" containerID="c33d750742bdeb88b9be310509909e0b32fdc85ff1120188005133fb36d6f336" Oct 11 04:46:35 crc kubenswrapper[4798]: I1011 04:46:35.736142 4798 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c446a2ed-b8a3-4043-b8f6-c709f4e97c2b-config-data\") on node \"crc\" DevicePath \"\"" Oct 11 04:46:35 crc kubenswrapper[4798]: I1011 04:46:35.736159 4798 reconciler_common.go:293] "Volume detached for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") on node \"crc\" DevicePath \"\"" Oct 11 04:46:35 crc kubenswrapper[4798]: I1011 04:46:35.736321 4798 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c446a2ed-b8a3-4043-b8f6-c709f4e97c2b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 11 04:46:35 crc kubenswrapper[4798]: I1011 04:46:35.736341 4798 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c446a2ed-b8a3-4043-b8f6-c709f4e97c2b-public-tls-certs\") on node \"crc\" DevicePath \"\"" Oct 11 04:46:35 crc kubenswrapper[4798]: E1011 04:46:35.741655 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c33d750742bdeb88b9be310509909e0b32fdc85ff1120188005133fb36d6f336\": container with ID starting with c33d750742bdeb88b9be310509909e0b32fdc85ff1120188005133fb36d6f336 not found: ID does not exist" containerID="c33d750742bdeb88b9be310509909e0b32fdc85ff1120188005133fb36d6f336" Oct 11 04:46:35 crc kubenswrapper[4798]: I1011 04:46:35.741714 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c33d750742bdeb88b9be310509909e0b32fdc85ff1120188005133fb36d6f336"} err="failed to get container status \"c33d750742bdeb88b9be310509909e0b32fdc85ff1120188005133fb36d6f336\": rpc error: code = NotFound desc = could not find container \"c33d750742bdeb88b9be310509909e0b32fdc85ff1120188005133fb36d6f336\": container with ID starting with c33d750742bdeb88b9be310509909e0b32fdc85ff1120188005133fb36d6f336 not found: ID does not exist" Oct 11 04:46:35 crc kubenswrapper[4798]: I1011 04:46:35.741746 4798 scope.go:117] "RemoveContainer" containerID="737c94d5a1539d719e51b8c3e8cb9366ab6fe60144a044f37f2b297e1b73c599" Oct 11 04:46:35 crc kubenswrapper[4798]: E1011 04:46:35.749612 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"737c94d5a1539d719e51b8c3e8cb9366ab6fe60144a044f37f2b297e1b73c599\": container with ID starting with 737c94d5a1539d719e51b8c3e8cb9366ab6fe60144a044f37f2b297e1b73c599 not found: ID does not exist" containerID="737c94d5a1539d719e51b8c3e8cb9366ab6fe60144a044f37f2b297e1b73c599" Oct 11 04:46:35 crc kubenswrapper[4798]: I1011 04:46:35.749673 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"737c94d5a1539d719e51b8c3e8cb9366ab6fe60144a044f37f2b297e1b73c599"} err="failed to get container status \"737c94d5a1539d719e51b8c3e8cb9366ab6fe60144a044f37f2b297e1b73c599\": rpc error: code = NotFound desc = could not find container \"737c94d5a1539d719e51b8c3e8cb9366ab6fe60144a044f37f2b297e1b73c599\": container with ID starting with 737c94d5a1539d719e51b8c3e8cb9366ab6fe60144a044f37f2b297e1b73c599 not found: ID does not exist" Oct 11 04:46:35 crc kubenswrapper[4798]: I1011 04:46:35.749710 4798 scope.go:117] "RemoveContainer" containerID="c33d750742bdeb88b9be310509909e0b32fdc85ff1120188005133fb36d6f336" Oct 11 04:46:35 crc kubenswrapper[4798]: I1011 04:46:35.755980 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c33d750742bdeb88b9be310509909e0b32fdc85ff1120188005133fb36d6f336"} err="failed to get container status \"c33d750742bdeb88b9be310509909e0b32fdc85ff1120188005133fb36d6f336\": rpc error: code = NotFound desc = could not find container \"c33d750742bdeb88b9be310509909e0b32fdc85ff1120188005133fb36d6f336\": container with ID starting with c33d750742bdeb88b9be310509909e0b32fdc85ff1120188005133fb36d6f336 not found: ID does not exist" Oct 11 04:46:35 crc kubenswrapper[4798]: I1011 04:46:35.756054 4798 scope.go:117] "RemoveContainer" containerID="737c94d5a1539d719e51b8c3e8cb9366ab6fe60144a044f37f2b297e1b73c599" Oct 11 04:46:35 crc kubenswrapper[4798]: I1011 04:46:35.759465 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"737c94d5a1539d719e51b8c3e8cb9366ab6fe60144a044f37f2b297e1b73c599"} err="failed to get container status \"737c94d5a1539d719e51b8c3e8cb9366ab6fe60144a044f37f2b297e1b73c599\": rpc error: code = NotFound desc = could not find container \"737c94d5a1539d719e51b8c3e8cb9366ab6fe60144a044f37f2b297e1b73c599\": container with ID starting with 737c94d5a1539d719e51b8c3e8cb9366ab6fe60144a044f37f2b297e1b73c599 not found: ID does not exist" Oct 11 04:46:35 crc kubenswrapper[4798]: I1011 04:46:35.759496 4798 scope.go:117] "RemoveContainer" containerID="ade5fafde9840ea79603055ecee8441fa8c5e21de53b024df37375149ab025ba" Oct 11 04:46:35 crc kubenswrapper[4798]: I1011 04:46:35.838666 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/53c9a7f3-ce03-406a-8c8a-56f59838bf6b-config-data\") pod \"glance-default-internal-api-0\" (UID: \"53c9a7f3-ce03-406a-8c8a-56f59838bf6b\") " pod="openstack/glance-default-internal-api-0" Oct 11 04:46:35 crc kubenswrapper[4798]: I1011 04:46:35.839392 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/53c9a7f3-ce03-406a-8c8a-56f59838bf6b-ceph\") pod \"glance-default-internal-api-0\" (UID: \"53c9a7f3-ce03-406a-8c8a-56f59838bf6b\") " pod="openstack/glance-default-internal-api-0" Oct 11 04:46:35 crc kubenswrapper[4798]: I1011 04:46:35.839793 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/53c9a7f3-ce03-406a-8c8a-56f59838bf6b-scripts\") pod \"glance-default-internal-api-0\" (UID: \"53c9a7f3-ce03-406a-8c8a-56f59838bf6b\") " pod="openstack/glance-default-internal-api-0" Oct 11 04:46:35 crc kubenswrapper[4798]: I1011 04:46:35.839822 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ln78b\" (UniqueName: \"kubernetes.io/projected/53c9a7f3-ce03-406a-8c8a-56f59838bf6b-kube-api-access-ln78b\") pod \"glance-default-internal-api-0\" (UID: \"53c9a7f3-ce03-406a-8c8a-56f59838bf6b\") " pod="openstack/glance-default-internal-api-0" Oct 11 04:46:35 crc kubenswrapper[4798]: I1011 04:46:35.839874 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/53c9a7f3-ce03-406a-8c8a-56f59838bf6b-logs\") pod \"glance-default-internal-api-0\" (UID: \"53c9a7f3-ce03-406a-8c8a-56f59838bf6b\") " pod="openstack/glance-default-internal-api-0" Oct 11 04:46:35 crc kubenswrapper[4798]: I1011 04:46:35.839897 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/53c9a7f3-ce03-406a-8c8a-56f59838bf6b-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"53c9a7f3-ce03-406a-8c8a-56f59838bf6b\") " pod="openstack/glance-default-internal-api-0" Oct 11 04:46:35 crc kubenswrapper[4798]: I1011 04:46:35.839957 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/53c9a7f3-ce03-406a-8c8a-56f59838bf6b-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"53c9a7f3-ce03-406a-8c8a-56f59838bf6b\") " pod="openstack/glance-default-internal-api-0" Oct 11 04:46:35 crc kubenswrapper[4798]: I1011 04:46:35.839978 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"glance-default-internal-api-0\" (UID: \"53c9a7f3-ce03-406a-8c8a-56f59838bf6b\") " pod="openstack/glance-default-internal-api-0" Oct 11 04:46:35 crc kubenswrapper[4798]: I1011 04:46:35.839999 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/53c9a7f3-ce03-406a-8c8a-56f59838bf6b-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"53c9a7f3-ce03-406a-8c8a-56f59838bf6b\") " pod="openstack/glance-default-internal-api-0" Oct 11 04:46:35 crc kubenswrapper[4798]: I1011 04:46:35.842620 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/53c9a7f3-ce03-406a-8c8a-56f59838bf6b-logs\") pod \"glance-default-internal-api-0\" (UID: \"53c9a7f3-ce03-406a-8c8a-56f59838bf6b\") " pod="openstack/glance-default-internal-api-0" Oct 11 04:46:35 crc kubenswrapper[4798]: I1011 04:46:35.843257 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/53c9a7f3-ce03-406a-8c8a-56f59838bf6b-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"53c9a7f3-ce03-406a-8c8a-56f59838bf6b\") " pod="openstack/glance-default-internal-api-0" Oct 11 04:46:35 crc kubenswrapper[4798]: I1011 04:46:35.844447 4798 scope.go:117] "RemoveContainer" containerID="f1591be6a9c3885f41b5bade02c180259a748baf9488fb914eea389e853365b1" Oct 11 04:46:35 crc kubenswrapper[4798]: I1011 04:46:35.846122 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/53c9a7f3-ce03-406a-8c8a-56f59838bf6b-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"53c9a7f3-ce03-406a-8c8a-56f59838bf6b\") " pod="openstack/glance-default-internal-api-0" Oct 11 04:46:35 crc kubenswrapper[4798]: I1011 04:46:35.846299 4798 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"glance-default-internal-api-0\" (UID: \"53c9a7f3-ce03-406a-8c8a-56f59838bf6b\") device mount path \"/mnt/openstack/pv05\"" pod="openstack/glance-default-internal-api-0" Oct 11 04:46:35 crc kubenswrapper[4798]: I1011 04:46:35.849817 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/53c9a7f3-ce03-406a-8c8a-56f59838bf6b-scripts\") pod \"glance-default-internal-api-0\" (UID: \"53c9a7f3-ce03-406a-8c8a-56f59838bf6b\") " pod="openstack/glance-default-internal-api-0" Oct 11 04:46:35 crc kubenswrapper[4798]: I1011 04:46:35.851375 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/53c9a7f3-ce03-406a-8c8a-56f59838bf6b-config-data\") pod \"glance-default-internal-api-0\" (UID: \"53c9a7f3-ce03-406a-8c8a-56f59838bf6b\") " pod="openstack/glance-default-internal-api-0" Oct 11 04:46:35 crc kubenswrapper[4798]: I1011 04:46:35.851774 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/53c9a7f3-ce03-406a-8c8a-56f59838bf6b-ceph\") pod \"glance-default-internal-api-0\" (UID: \"53c9a7f3-ce03-406a-8c8a-56f59838bf6b\") " pod="openstack/glance-default-internal-api-0" Oct 11 04:46:35 crc kubenswrapper[4798]: I1011 04:46:35.861729 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/53c9a7f3-ce03-406a-8c8a-56f59838bf6b-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"53c9a7f3-ce03-406a-8c8a-56f59838bf6b\") " pod="openstack/glance-default-internal-api-0" Oct 11 04:46:35 crc kubenswrapper[4798]: I1011 04:46:35.887301 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ln78b\" (UniqueName: \"kubernetes.io/projected/53c9a7f3-ce03-406a-8c8a-56f59838bf6b-kube-api-access-ln78b\") pod \"glance-default-internal-api-0\" (UID: \"53c9a7f3-ce03-406a-8c8a-56f59838bf6b\") " pod="openstack/glance-default-internal-api-0" Oct 11 04:46:35 crc kubenswrapper[4798]: I1011 04:46:35.934484 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"glance-default-internal-api-0\" (UID: \"53c9a7f3-ce03-406a-8c8a-56f59838bf6b\") " pod="openstack/glance-default-internal-api-0" Oct 11 04:46:35 crc kubenswrapper[4798]: I1011 04:46:35.952204 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Oct 11 04:46:35 crc kubenswrapper[4798]: I1011 04:46:35.994382 4798 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Oct 11 04:46:36 crc kubenswrapper[4798]: I1011 04:46:36.001186 4798 scope.go:117] "RemoveContainer" containerID="ade5fafde9840ea79603055ecee8441fa8c5e21de53b024df37375149ab025ba" Oct 11 04:46:36 crc kubenswrapper[4798]: E1011 04:46:36.002030 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ade5fafde9840ea79603055ecee8441fa8c5e21de53b024df37375149ab025ba\": container with ID starting with ade5fafde9840ea79603055ecee8441fa8c5e21de53b024df37375149ab025ba not found: ID does not exist" containerID="ade5fafde9840ea79603055ecee8441fa8c5e21de53b024df37375149ab025ba" Oct 11 04:46:36 crc kubenswrapper[4798]: I1011 04:46:36.002096 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ade5fafde9840ea79603055ecee8441fa8c5e21de53b024df37375149ab025ba"} err="failed to get container status \"ade5fafde9840ea79603055ecee8441fa8c5e21de53b024df37375149ab025ba\": rpc error: code = NotFound desc = could not find container \"ade5fafde9840ea79603055ecee8441fa8c5e21de53b024df37375149ab025ba\": container with ID starting with ade5fafde9840ea79603055ecee8441fa8c5e21de53b024df37375149ab025ba not found: ID does not exist" Oct 11 04:46:36 crc kubenswrapper[4798]: I1011 04:46:36.002130 4798 scope.go:117] "RemoveContainer" containerID="f1591be6a9c3885f41b5bade02c180259a748baf9488fb914eea389e853365b1" Oct 11 04:46:36 crc kubenswrapper[4798]: E1011 04:46:36.004700 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f1591be6a9c3885f41b5bade02c180259a748baf9488fb914eea389e853365b1\": container with ID starting with f1591be6a9c3885f41b5bade02c180259a748baf9488fb914eea389e853365b1 not found: ID does not exist" containerID="f1591be6a9c3885f41b5bade02c180259a748baf9488fb914eea389e853365b1" Oct 11 04:46:36 crc kubenswrapper[4798]: I1011 04:46:36.004724 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f1591be6a9c3885f41b5bade02c180259a748baf9488fb914eea389e853365b1"} err="failed to get container status \"f1591be6a9c3885f41b5bade02c180259a748baf9488fb914eea389e853365b1\": rpc error: code = NotFound desc = could not find container \"f1591be6a9c3885f41b5bade02c180259a748baf9488fb914eea389e853365b1\": container with ID starting with f1591be6a9c3885f41b5bade02c180259a748baf9488fb914eea389e853365b1 not found: ID does not exist" Oct 11 04:46:36 crc kubenswrapper[4798]: I1011 04:46:36.004739 4798 scope.go:117] "RemoveContainer" containerID="ade5fafde9840ea79603055ecee8441fa8c5e21de53b024df37375149ab025ba" Oct 11 04:46:36 crc kubenswrapper[4798]: I1011 04:46:36.008572 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ade5fafde9840ea79603055ecee8441fa8c5e21de53b024df37375149ab025ba"} err="failed to get container status \"ade5fafde9840ea79603055ecee8441fa8c5e21de53b024df37375149ab025ba\": rpc error: code = NotFound desc = could not find container \"ade5fafde9840ea79603055ecee8441fa8c5e21de53b024df37375149ab025ba\": container with ID starting with ade5fafde9840ea79603055ecee8441fa8c5e21de53b024df37375149ab025ba not found: ID does not exist" Oct 11 04:46:36 crc kubenswrapper[4798]: I1011 04:46:36.008631 4798 scope.go:117] "RemoveContainer" containerID="f1591be6a9c3885f41b5bade02c180259a748baf9488fb914eea389e853365b1" Oct 11 04:46:36 crc kubenswrapper[4798]: I1011 04:46:36.009913 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f1591be6a9c3885f41b5bade02c180259a748baf9488fb914eea389e853365b1"} err="failed to get container status \"f1591be6a9c3885f41b5bade02c180259a748baf9488fb914eea389e853365b1\": rpc error: code = NotFound desc = could not find container \"f1591be6a9c3885f41b5bade02c180259a748baf9488fb914eea389e853365b1\": container with ID starting with f1591be6a9c3885f41b5bade02c180259a748baf9488fb914eea389e853365b1 not found: ID does not exist" Oct 11 04:46:36 crc kubenswrapper[4798]: I1011 04:46:36.028149 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Oct 11 04:46:36 crc kubenswrapper[4798]: I1011 04:46:36.047998 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Oct 11 04:46:36 crc kubenswrapper[4798]: I1011 04:46:36.050637 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Oct 11 04:46:36 crc kubenswrapper[4798]: I1011 04:46:36.052870 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Oct 11 04:46:36 crc kubenswrapper[4798]: I1011 04:46:36.053051 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Oct 11 04:46:36 crc kubenswrapper[4798]: I1011 04:46:36.073609 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Oct 11 04:46:36 crc kubenswrapper[4798]: I1011 04:46:36.184322 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4207ecba-8fbe-4317-9292-8b7fd0d4df8c-logs\") pod \"glance-default-external-api-0\" (UID: \"4207ecba-8fbe-4317-9292-8b7fd0d4df8c\") " pod="openstack/glance-default-external-api-0" Oct 11 04:46:36 crc kubenswrapper[4798]: I1011 04:46:36.184831 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/4207ecba-8fbe-4317-9292-8b7fd0d4df8c-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"4207ecba-8fbe-4317-9292-8b7fd0d4df8c\") " pod="openstack/glance-default-external-api-0" Oct 11 04:46:36 crc kubenswrapper[4798]: I1011 04:46:36.184877 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4207ecba-8fbe-4317-9292-8b7fd0d4df8c-scripts\") pod \"glance-default-external-api-0\" (UID: \"4207ecba-8fbe-4317-9292-8b7fd0d4df8c\") " pod="openstack/glance-default-external-api-0" Oct 11 04:46:36 crc kubenswrapper[4798]: I1011 04:46:36.185050 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h96sd\" (UniqueName: \"kubernetes.io/projected/4207ecba-8fbe-4317-9292-8b7fd0d4df8c-kube-api-access-h96sd\") pod \"glance-default-external-api-0\" (UID: \"4207ecba-8fbe-4317-9292-8b7fd0d4df8c\") " pod="openstack/glance-default-external-api-0" Oct 11 04:46:36 crc kubenswrapper[4798]: I1011 04:46:36.185125 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-external-api-0\" (UID: \"4207ecba-8fbe-4317-9292-8b7fd0d4df8c\") " pod="openstack/glance-default-external-api-0" Oct 11 04:46:36 crc kubenswrapper[4798]: I1011 04:46:36.185611 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4207ecba-8fbe-4317-9292-8b7fd0d4df8c-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"4207ecba-8fbe-4317-9292-8b7fd0d4df8c\") " pod="openstack/glance-default-external-api-0" Oct 11 04:46:36 crc kubenswrapper[4798]: I1011 04:46:36.185698 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/4207ecba-8fbe-4317-9292-8b7fd0d4df8c-ceph\") pod \"glance-default-external-api-0\" (UID: \"4207ecba-8fbe-4317-9292-8b7fd0d4df8c\") " pod="openstack/glance-default-external-api-0" Oct 11 04:46:36 crc kubenswrapper[4798]: I1011 04:46:36.185789 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4207ecba-8fbe-4317-9292-8b7fd0d4df8c-config-data\") pod \"glance-default-external-api-0\" (UID: \"4207ecba-8fbe-4317-9292-8b7fd0d4df8c\") " pod="openstack/glance-default-external-api-0" Oct 11 04:46:36 crc kubenswrapper[4798]: I1011 04:46:36.185844 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/4207ecba-8fbe-4317-9292-8b7fd0d4df8c-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"4207ecba-8fbe-4317-9292-8b7fd0d4df8c\") " pod="openstack/glance-default-external-api-0" Oct 11 04:46:36 crc kubenswrapper[4798]: I1011 04:46:36.292043 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h96sd\" (UniqueName: \"kubernetes.io/projected/4207ecba-8fbe-4317-9292-8b7fd0d4df8c-kube-api-access-h96sd\") pod \"glance-default-external-api-0\" (UID: \"4207ecba-8fbe-4317-9292-8b7fd0d4df8c\") " pod="openstack/glance-default-external-api-0" Oct 11 04:46:36 crc kubenswrapper[4798]: I1011 04:46:36.292136 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-external-api-0\" (UID: \"4207ecba-8fbe-4317-9292-8b7fd0d4df8c\") " pod="openstack/glance-default-external-api-0" Oct 11 04:46:36 crc kubenswrapper[4798]: I1011 04:46:36.292188 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4207ecba-8fbe-4317-9292-8b7fd0d4df8c-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"4207ecba-8fbe-4317-9292-8b7fd0d4df8c\") " pod="openstack/glance-default-external-api-0" Oct 11 04:46:36 crc kubenswrapper[4798]: I1011 04:46:36.292250 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/4207ecba-8fbe-4317-9292-8b7fd0d4df8c-ceph\") pod \"glance-default-external-api-0\" (UID: \"4207ecba-8fbe-4317-9292-8b7fd0d4df8c\") " pod="openstack/glance-default-external-api-0" Oct 11 04:46:36 crc kubenswrapper[4798]: I1011 04:46:36.292284 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4207ecba-8fbe-4317-9292-8b7fd0d4df8c-config-data\") pod \"glance-default-external-api-0\" (UID: \"4207ecba-8fbe-4317-9292-8b7fd0d4df8c\") " pod="openstack/glance-default-external-api-0" Oct 11 04:46:36 crc kubenswrapper[4798]: I1011 04:46:36.292330 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/4207ecba-8fbe-4317-9292-8b7fd0d4df8c-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"4207ecba-8fbe-4317-9292-8b7fd0d4df8c\") " pod="openstack/glance-default-external-api-0" Oct 11 04:46:36 crc kubenswrapper[4798]: I1011 04:46:36.292361 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4207ecba-8fbe-4317-9292-8b7fd0d4df8c-logs\") pod \"glance-default-external-api-0\" (UID: \"4207ecba-8fbe-4317-9292-8b7fd0d4df8c\") " pod="openstack/glance-default-external-api-0" Oct 11 04:46:36 crc kubenswrapper[4798]: I1011 04:46:36.292376 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/4207ecba-8fbe-4317-9292-8b7fd0d4df8c-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"4207ecba-8fbe-4317-9292-8b7fd0d4df8c\") " pod="openstack/glance-default-external-api-0" Oct 11 04:46:36 crc kubenswrapper[4798]: I1011 04:46:36.292395 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4207ecba-8fbe-4317-9292-8b7fd0d4df8c-scripts\") pod \"glance-default-external-api-0\" (UID: \"4207ecba-8fbe-4317-9292-8b7fd0d4df8c\") " pod="openstack/glance-default-external-api-0" Oct 11 04:46:36 crc kubenswrapper[4798]: I1011 04:46:36.292911 4798 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-external-api-0\" (UID: \"4207ecba-8fbe-4317-9292-8b7fd0d4df8c\") device mount path \"/mnt/openstack/pv10\"" pod="openstack/glance-default-external-api-0" Oct 11 04:46:36 crc kubenswrapper[4798]: I1011 04:46:36.295163 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4207ecba-8fbe-4317-9292-8b7fd0d4df8c-logs\") pod \"glance-default-external-api-0\" (UID: \"4207ecba-8fbe-4317-9292-8b7fd0d4df8c\") " pod="openstack/glance-default-external-api-0" Oct 11 04:46:36 crc kubenswrapper[4798]: I1011 04:46:36.295778 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/4207ecba-8fbe-4317-9292-8b7fd0d4df8c-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"4207ecba-8fbe-4317-9292-8b7fd0d4df8c\") " pod="openstack/glance-default-external-api-0" Oct 11 04:46:36 crc kubenswrapper[4798]: I1011 04:46:36.309732 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/4207ecba-8fbe-4317-9292-8b7fd0d4df8c-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"4207ecba-8fbe-4317-9292-8b7fd0d4df8c\") " pod="openstack/glance-default-external-api-0" Oct 11 04:46:36 crc kubenswrapper[4798]: I1011 04:46:36.318776 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h96sd\" (UniqueName: \"kubernetes.io/projected/4207ecba-8fbe-4317-9292-8b7fd0d4df8c-kube-api-access-h96sd\") pod \"glance-default-external-api-0\" (UID: \"4207ecba-8fbe-4317-9292-8b7fd0d4df8c\") " pod="openstack/glance-default-external-api-0" Oct 11 04:46:36 crc kubenswrapper[4798]: I1011 04:46:36.321899 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4207ecba-8fbe-4317-9292-8b7fd0d4df8c-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"4207ecba-8fbe-4317-9292-8b7fd0d4df8c\") " pod="openstack/glance-default-external-api-0" Oct 11 04:46:36 crc kubenswrapper[4798]: I1011 04:46:36.324924 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/4207ecba-8fbe-4317-9292-8b7fd0d4df8c-ceph\") pod \"glance-default-external-api-0\" (UID: \"4207ecba-8fbe-4317-9292-8b7fd0d4df8c\") " pod="openstack/glance-default-external-api-0" Oct 11 04:46:36 crc kubenswrapper[4798]: I1011 04:46:36.345189 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4207ecba-8fbe-4317-9292-8b7fd0d4df8c-config-data\") pod \"glance-default-external-api-0\" (UID: \"4207ecba-8fbe-4317-9292-8b7fd0d4df8c\") " pod="openstack/glance-default-external-api-0" Oct 11 04:46:36 crc kubenswrapper[4798]: I1011 04:46:36.347163 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4207ecba-8fbe-4317-9292-8b7fd0d4df8c-scripts\") pod \"glance-default-external-api-0\" (UID: \"4207ecba-8fbe-4317-9292-8b7fd0d4df8c\") " pod="openstack/glance-default-external-api-0" Oct 11 04:46:36 crc kubenswrapper[4798]: I1011 04:46:36.350193 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-external-api-0\" (UID: \"4207ecba-8fbe-4317-9292-8b7fd0d4df8c\") " pod="openstack/glance-default-external-api-0" Oct 11 04:46:36 crc kubenswrapper[4798]: I1011 04:46:36.426495 4798 scope.go:117] "RemoveContainer" containerID="999be6c75e4c41e3d1de7a3a50ee34084c0f4ead3f5f5fdebf32b1d0054c6b5f" Oct 11 04:46:36 crc kubenswrapper[4798]: E1011 04:46:36.426905 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h28s2_openshift-machine-config-operator(42571bc8-2186-4e3b-bba9-28f5a8f364d0)\"" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" Oct 11 04:46:36 crc kubenswrapper[4798]: I1011 04:46:36.507097 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Oct 11 04:46:36 crc kubenswrapper[4798]: I1011 04:46:36.894631 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Oct 11 04:46:37 crc kubenswrapper[4798]: I1011 04:46:37.402351 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Oct 11 04:46:37 crc kubenswrapper[4798]: I1011 04:46:37.491639 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c400462d-db9e-442e-99b7-68cd496770ab" path="/var/lib/kubelet/pods/c400462d-db9e-442e-99b7-68cd496770ab/volumes" Oct 11 04:46:37 crc kubenswrapper[4798]: I1011 04:46:37.492661 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c446a2ed-b8a3-4043-b8f6-c709f4e97c2b" path="/var/lib/kubelet/pods/c446a2ed-b8a3-4043-b8f6-c709f4e97c2b/volumes" Oct 11 04:46:37 crc kubenswrapper[4798]: I1011 04:46:37.679875 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"4207ecba-8fbe-4317-9292-8b7fd0d4df8c","Type":"ContainerStarted","Data":"6cc0e7c1b84cb77734e4f664746da86b0ae869f80557b76123500b9ec9d1a6c4"} Oct 11 04:46:37 crc kubenswrapper[4798]: I1011 04:46:37.705604 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"53c9a7f3-ce03-406a-8c8a-56f59838bf6b","Type":"ContainerStarted","Data":"58a244be34371dfaa81e38db29c293e429406295d9367badc2493612425f9aef"} Oct 11 04:46:38 crc kubenswrapper[4798]: I1011 04:46:38.762501 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"4207ecba-8fbe-4317-9292-8b7fd0d4df8c","Type":"ContainerStarted","Data":"e9d5df286d273cd360c4792ce374e84d6650711472e8b314adbbb070caff70af"} Oct 11 04:46:38 crc kubenswrapper[4798]: I1011 04:46:38.766065 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"53c9a7f3-ce03-406a-8c8a-56f59838bf6b","Type":"ContainerStarted","Data":"ede9bf3acef03892136608928028f2f25b5230f2eb92bc0d83a2e59facc8107e"} Oct 11 04:46:39 crc kubenswrapper[4798]: I1011 04:46:39.412496 4798 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-volume-volume1-0" Oct 11 04:46:39 crc kubenswrapper[4798]: I1011 04:46:39.462374 4798 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-backup-0" Oct 11 04:46:39 crc kubenswrapper[4798]: I1011 04:46:39.500051 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/manila-dd57-account-create-bdglj"] Oct 11 04:46:39 crc kubenswrapper[4798]: I1011 04:46:39.501574 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-dd57-account-create-bdglj" Oct 11 04:46:39 crc kubenswrapper[4798]: I1011 04:46:39.506366 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"manila-db-secret" Oct 11 04:46:39 crc kubenswrapper[4798]: I1011 04:46:39.520144 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-dd57-account-create-bdglj"] Oct 11 04:46:39 crc kubenswrapper[4798]: I1011 04:46:39.616049 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t6dnw\" (UniqueName: \"kubernetes.io/projected/96e78519-e8bc-4849-894c-781a43a4bbf7-kube-api-access-t6dnw\") pod \"manila-dd57-account-create-bdglj\" (UID: \"96e78519-e8bc-4849-894c-781a43a4bbf7\") " pod="openstack/manila-dd57-account-create-bdglj" Oct 11 04:46:39 crc kubenswrapper[4798]: I1011 04:46:39.719032 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t6dnw\" (UniqueName: \"kubernetes.io/projected/96e78519-e8bc-4849-894c-781a43a4bbf7-kube-api-access-t6dnw\") pod \"manila-dd57-account-create-bdglj\" (UID: \"96e78519-e8bc-4849-894c-781a43a4bbf7\") " pod="openstack/manila-dd57-account-create-bdglj" Oct 11 04:46:39 crc kubenswrapper[4798]: I1011 04:46:39.749222 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t6dnw\" (UniqueName: \"kubernetes.io/projected/96e78519-e8bc-4849-894c-781a43a4bbf7-kube-api-access-t6dnw\") pod \"manila-dd57-account-create-bdglj\" (UID: \"96e78519-e8bc-4849-894c-781a43a4bbf7\") " pod="openstack/manila-dd57-account-create-bdglj" Oct 11 04:46:39 crc kubenswrapper[4798]: I1011 04:46:39.785264 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"4207ecba-8fbe-4317-9292-8b7fd0d4df8c","Type":"ContainerStarted","Data":"480d4ac13280950eeafc1c8e9f046d991795a51d0e47cfd9344749ec73bc5af6"} Oct 11 04:46:39 crc kubenswrapper[4798]: I1011 04:46:39.794307 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"53c9a7f3-ce03-406a-8c8a-56f59838bf6b","Type":"ContainerStarted","Data":"63ebf7399e299e87d78ab337ef4061b567ed3a858d69f8076840fb56500dbcf0"} Oct 11 04:46:39 crc kubenswrapper[4798]: I1011 04:46:39.821108 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=4.821077184 podStartE2EDuration="4.821077184s" podCreationTimestamp="2025-10-11 04:46:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 04:46:39.814262184 +0000 UTC m=+3095.150551870" watchObservedRunningTime="2025-10-11 04:46:39.821077184 +0000 UTC m=+3095.157366870" Oct 11 04:46:39 crc kubenswrapper[4798]: I1011 04:46:39.842336 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-dd57-account-create-bdglj" Oct 11 04:46:39 crc kubenswrapper[4798]: I1011 04:46:39.858060 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=4.858032182 podStartE2EDuration="4.858032182s" podCreationTimestamp="2025-10-11 04:46:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 04:46:39.850609827 +0000 UTC m=+3095.186899513" watchObservedRunningTime="2025-10-11 04:46:39.858032182 +0000 UTC m=+3095.194321868" Oct 11 04:46:45 crc kubenswrapper[4798]: I1011 04:46:45.713243 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-dd57-account-create-bdglj"] Oct 11 04:46:45 crc kubenswrapper[4798]: W1011 04:46:45.729724 4798 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod96e78519_e8bc_4849_894c_781a43a4bbf7.slice/crio-efb5cd318bf689f1fa9ced668dcec7c38ad7de0d728ccb2c88f1f486681e92ff WatchSource:0}: Error finding container efb5cd318bf689f1fa9ced668dcec7c38ad7de0d728ccb2c88f1f486681e92ff: Status 404 returned error can't find the container with id efb5cd318bf689f1fa9ced668dcec7c38ad7de0d728ccb2c88f1f486681e92ff Oct 11 04:46:45 crc kubenswrapper[4798]: I1011 04:46:45.885281 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-dd57-account-create-bdglj" event={"ID":"96e78519-e8bc-4849-894c-781a43a4bbf7","Type":"ContainerStarted","Data":"efb5cd318bf689f1fa9ced668dcec7c38ad7de0d728ccb2c88f1f486681e92ff"} Oct 11 04:46:45 crc kubenswrapper[4798]: I1011 04:46:45.900794 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-bf5b8bcbc-wjw4p" event={"ID":"778270e6-b796-4692-b341-fb2091a1576e","Type":"ContainerStarted","Data":"5a73af5c3e15c8f56d503cd76525c30ceb207108da1c29c1fe8d8aafd38e337b"} Oct 11 04:46:45 crc kubenswrapper[4798]: I1011 04:46:45.900854 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-bf5b8bcbc-wjw4p" event={"ID":"778270e6-b796-4692-b341-fb2091a1576e","Type":"ContainerStarted","Data":"d998f330eaf25e58e7d95131ec3108efec09637f8f8ae87de229125965027f69"} Oct 11 04:46:45 crc kubenswrapper[4798]: I1011 04:46:45.900895 4798 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-bf5b8bcbc-wjw4p" podUID="778270e6-b796-4692-b341-fb2091a1576e" containerName="horizon-log" containerID="cri-o://d998f330eaf25e58e7d95131ec3108efec09637f8f8ae87de229125965027f69" gracePeriod=30 Oct 11 04:46:45 crc kubenswrapper[4798]: I1011 04:46:45.900898 4798 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-bf5b8bcbc-wjw4p" podUID="778270e6-b796-4692-b341-fb2091a1576e" containerName="horizon" containerID="cri-o://5a73af5c3e15c8f56d503cd76525c30ceb207108da1c29c1fe8d8aafd38e337b" gracePeriod=30 Oct 11 04:46:45 crc kubenswrapper[4798]: I1011 04:46:45.909045 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-77dc4c7c69-h9tvx" event={"ID":"de7b40fd-d975-4605-88eb-fe5e00086535","Type":"ContainerStarted","Data":"64bf0f6849af5de6416556618b579376ee499f79de5896582cca83d7a060a6cb"} Oct 11 04:46:45 crc kubenswrapper[4798]: I1011 04:46:45.919784 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-7b5998ffb-nn2rn" event={"ID":"a3fe5ce5-561f-4775-8024-6c4896079b30","Type":"ContainerStarted","Data":"34082f50cd3745d8c0989107755349acddca35a51872183f002438ebda0348ed"} Oct 11 04:46:45 crc kubenswrapper[4798]: I1011 04:46:45.942704 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/horizon-bf5b8bcbc-wjw4p" podStartSLOduration=2.823098947 podStartE2EDuration="16.942674268s" podCreationTimestamp="2025-10-11 04:46:29 +0000 UTC" firstStartedPulling="2025-10-11 04:46:31.149210732 +0000 UTC m=+3086.485500418" lastFinishedPulling="2025-10-11 04:46:45.268786053 +0000 UTC m=+3100.605075739" observedRunningTime="2025-10-11 04:46:45.935941629 +0000 UTC m=+3101.272231315" watchObservedRunningTime="2025-10-11 04:46:45.942674268 +0000 UTC m=+3101.278963954" Oct 11 04:46:46 crc kubenswrapper[4798]: I1011 04:46:46.029133 4798 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Oct 11 04:46:46 crc kubenswrapper[4798]: I1011 04:46:46.029211 4798 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Oct 11 04:46:46 crc kubenswrapper[4798]: I1011 04:46:46.099379 4798 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Oct 11 04:46:46 crc kubenswrapper[4798]: I1011 04:46:46.108759 4798 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Oct 11 04:46:46 crc kubenswrapper[4798]: I1011 04:46:46.508947 4798 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Oct 11 04:46:46 crc kubenswrapper[4798]: I1011 04:46:46.509020 4798 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Oct 11 04:46:46 crc kubenswrapper[4798]: I1011 04:46:46.556862 4798 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Oct 11 04:46:46 crc kubenswrapper[4798]: I1011 04:46:46.574654 4798 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Oct 11 04:46:46 crc kubenswrapper[4798]: I1011 04:46:46.937351 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-77dc4c7c69-h9tvx" event={"ID":"de7b40fd-d975-4605-88eb-fe5e00086535","Type":"ContainerStarted","Data":"9c63436abf654e2f49bdff0f01507463c397dc69a0159011b30d7e6837ab9c2a"} Oct 11 04:46:46 crc kubenswrapper[4798]: I1011 04:46:46.937866 4798 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-77dc4c7c69-h9tvx" podUID="de7b40fd-d975-4605-88eb-fe5e00086535" containerName="horizon-log" containerID="cri-o://64bf0f6849af5de6416556618b579376ee499f79de5896582cca83d7a060a6cb" gracePeriod=30 Oct 11 04:46:46 crc kubenswrapper[4798]: I1011 04:46:46.938028 4798 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-77dc4c7c69-h9tvx" podUID="de7b40fd-d975-4605-88eb-fe5e00086535" containerName="horizon" containerID="cri-o://9c63436abf654e2f49bdff0f01507463c397dc69a0159011b30d7e6837ab9c2a" gracePeriod=30 Oct 11 04:46:46 crc kubenswrapper[4798]: I1011 04:46:46.945772 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-7b5998ffb-nn2rn" event={"ID":"a3fe5ce5-561f-4775-8024-6c4896079b30","Type":"ContainerStarted","Data":"cda3fc20206d3cd61f40202a3c7c2496ceb4632361d05d52ff537b62442309cd"} Oct 11 04:46:46 crc kubenswrapper[4798]: I1011 04:46:46.951210 4798 generic.go:334] "Generic (PLEG): container finished" podID="96e78519-e8bc-4849-894c-781a43a4bbf7" containerID="d4f65dac1a1f9ebcda3786e32adbed8ba4fd6ee01b2ca4d9648ed054fbab9517" exitCode=0 Oct 11 04:46:46 crc kubenswrapper[4798]: I1011 04:46:46.951305 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-dd57-account-create-bdglj" event={"ID":"96e78519-e8bc-4849-894c-781a43a4bbf7","Type":"ContainerDied","Data":"d4f65dac1a1f9ebcda3786e32adbed8ba4fd6ee01b2ca4d9648ed054fbab9517"} Oct 11 04:46:46 crc kubenswrapper[4798]: I1011 04:46:46.955129 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-78d45886d-tlzfl" event={"ID":"0c09ff0d-de2b-4e1d-b39f-f467c8b03fb7","Type":"ContainerStarted","Data":"e5a0bf3052f9a7a689c723780253fef10a5e615d7ab1c128a44088070fa7c599"} Oct 11 04:46:46 crc kubenswrapper[4798]: I1011 04:46:46.955185 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-78d45886d-tlzfl" event={"ID":"0c09ff0d-de2b-4e1d-b39f-f467c8b03fb7","Type":"ContainerStarted","Data":"6e79ada2ce3c5b65f6f244fb7a98a83d07af92873ba6a0cf203af681fb65f44d"} Oct 11 04:46:46 crc kubenswrapper[4798]: I1011 04:46:46.956215 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Oct 11 04:46:46 crc kubenswrapper[4798]: I1011 04:46:46.956293 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Oct 11 04:46:46 crc kubenswrapper[4798]: I1011 04:46:46.956313 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Oct 11 04:46:46 crc kubenswrapper[4798]: I1011 04:46:46.956336 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Oct 11 04:46:46 crc kubenswrapper[4798]: I1011 04:46:46.972718 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/horizon-77dc4c7c69-h9tvx" podStartSLOduration=3.395273043 podStartE2EDuration="17.972693396s" podCreationTimestamp="2025-10-11 04:46:29 +0000 UTC" firstStartedPulling="2025-10-11 04:46:30.792856243 +0000 UTC m=+3086.129145929" lastFinishedPulling="2025-10-11 04:46:45.370276596 +0000 UTC m=+3100.706566282" observedRunningTime="2025-10-11 04:46:46.964666437 +0000 UTC m=+3102.300956133" watchObservedRunningTime="2025-10-11 04:46:46.972693396 +0000 UTC m=+3102.308983102" Oct 11 04:46:46 crc kubenswrapper[4798]: I1011 04:46:46.991915 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/horizon-7b5998ffb-nn2rn" podStartSLOduration=3.6353752310000003 podStartE2EDuration="14.991895087s" podCreationTimestamp="2025-10-11 04:46:32 +0000 UTC" firstStartedPulling="2025-10-11 04:46:34.075297745 +0000 UTC m=+3089.411587431" lastFinishedPulling="2025-10-11 04:46:45.431817601 +0000 UTC m=+3100.768107287" observedRunningTime="2025-10-11 04:46:46.990433813 +0000 UTC m=+3102.326723499" watchObservedRunningTime="2025-10-11 04:46:46.991895087 +0000 UTC m=+3102.328184773" Oct 11 04:46:47 crc kubenswrapper[4798]: I1011 04:46:47.046849 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/horizon-78d45886d-tlzfl" podStartSLOduration=3.832561032 podStartE2EDuration="15.046822257s" podCreationTimestamp="2025-10-11 04:46:32 +0000 UTC" firstStartedPulling="2025-10-11 04:46:34.214655588 +0000 UTC m=+3089.550945274" lastFinishedPulling="2025-10-11 04:46:45.428916813 +0000 UTC m=+3100.765206499" observedRunningTime="2025-10-11 04:46:47.037362784 +0000 UTC m=+3102.373652460" watchObservedRunningTime="2025-10-11 04:46:47.046822257 +0000 UTC m=+3102.383111943" Oct 11 04:46:48 crc kubenswrapper[4798]: I1011 04:46:48.380281 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/manila-dd57-account-create-bdglj" Oct 11 04:46:48 crc kubenswrapper[4798]: I1011 04:46:48.563142 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t6dnw\" (UniqueName: \"kubernetes.io/projected/96e78519-e8bc-4849-894c-781a43a4bbf7-kube-api-access-t6dnw\") pod \"96e78519-e8bc-4849-894c-781a43a4bbf7\" (UID: \"96e78519-e8bc-4849-894c-781a43a4bbf7\") " Oct 11 04:46:48 crc kubenswrapper[4798]: I1011 04:46:48.577610 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/96e78519-e8bc-4849-894c-781a43a4bbf7-kube-api-access-t6dnw" (OuterVolumeSpecName: "kube-api-access-t6dnw") pod "96e78519-e8bc-4849-894c-781a43a4bbf7" (UID: "96e78519-e8bc-4849-894c-781a43a4bbf7"). InnerVolumeSpecName "kube-api-access-t6dnw". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 04:46:48 crc kubenswrapper[4798]: I1011 04:46:48.666131 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-t6dnw\" (UniqueName: \"kubernetes.io/projected/96e78519-e8bc-4849-894c-781a43a4bbf7-kube-api-access-t6dnw\") on node \"crc\" DevicePath \"\"" Oct 11 04:46:48 crc kubenswrapper[4798]: I1011 04:46:48.978091 4798 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Oct 11 04:46:48 crc kubenswrapper[4798]: I1011 04:46:48.978123 4798 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Oct 11 04:46:48 crc kubenswrapper[4798]: I1011 04:46:48.979773 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/manila-dd57-account-create-bdglj" Oct 11 04:46:48 crc kubenswrapper[4798]: I1011 04:46:48.993908 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-dd57-account-create-bdglj" event={"ID":"96e78519-e8bc-4849-894c-781a43a4bbf7","Type":"ContainerDied","Data":"efb5cd318bf689f1fa9ced668dcec7c38ad7de0d728ccb2c88f1f486681e92ff"} Oct 11 04:46:48 crc kubenswrapper[4798]: I1011 04:46:48.994429 4798 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="efb5cd318bf689f1fa9ced668dcec7c38ad7de0d728ccb2c88f1f486681e92ff" Oct 11 04:46:49 crc kubenswrapper[4798]: I1011 04:46:49.358459 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Oct 11 04:46:49 crc kubenswrapper[4798]: I1011 04:46:49.734674 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Oct 11 04:46:49 crc kubenswrapper[4798]: I1011 04:46:49.734822 4798 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Oct 11 04:46:49 crc kubenswrapper[4798]: I1011 04:46:49.856791 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Oct 11 04:46:49 crc kubenswrapper[4798]: I1011 04:46:49.867719 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-77dc4c7c69-h9tvx" Oct 11 04:46:49 crc kubenswrapper[4798]: I1011 04:46:49.965914 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/manila-db-sync-2qjln"] Oct 11 04:46:49 crc kubenswrapper[4798]: E1011 04:46:49.974544 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="96e78519-e8bc-4849-894c-781a43a4bbf7" containerName="mariadb-account-create" Oct 11 04:46:49 crc kubenswrapper[4798]: I1011 04:46:49.974581 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="96e78519-e8bc-4849-894c-781a43a4bbf7" containerName="mariadb-account-create" Oct 11 04:46:49 crc kubenswrapper[4798]: I1011 04:46:49.974870 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="96e78519-e8bc-4849-894c-781a43a4bbf7" containerName="mariadb-account-create" Oct 11 04:46:49 crc kubenswrapper[4798]: I1011 04:46:49.975663 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-db-sync-2qjln" Oct 11 04:46:49 crc kubenswrapper[4798]: I1011 04:46:49.980732 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"manila-manila-dockercfg-r6gpr" Oct 11 04:46:49 crc kubenswrapper[4798]: I1011 04:46:49.980845 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"manila-config-data" Oct 11 04:46:50 crc kubenswrapper[4798]: I1011 04:46:50.000175 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-db-sync-2qjln"] Oct 11 04:46:50 crc kubenswrapper[4798]: I1011 04:46:50.105273 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/24db8f3f-0f6f-4c88-be58-7de7bdd3d5d2-combined-ca-bundle\") pod \"manila-db-sync-2qjln\" (UID: \"24db8f3f-0f6f-4c88-be58-7de7bdd3d5d2\") " pod="openstack/manila-db-sync-2qjln" Oct 11 04:46:50 crc kubenswrapper[4798]: I1011 04:46:50.105406 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fclhq\" (UniqueName: \"kubernetes.io/projected/24db8f3f-0f6f-4c88-be58-7de7bdd3d5d2-kube-api-access-fclhq\") pod \"manila-db-sync-2qjln\" (UID: \"24db8f3f-0f6f-4c88-be58-7de7bdd3d5d2\") " pod="openstack/manila-db-sync-2qjln" Oct 11 04:46:50 crc kubenswrapper[4798]: I1011 04:46:50.105496 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"job-config-data\" (UniqueName: \"kubernetes.io/secret/24db8f3f-0f6f-4c88-be58-7de7bdd3d5d2-job-config-data\") pod \"manila-db-sync-2qjln\" (UID: \"24db8f3f-0f6f-4c88-be58-7de7bdd3d5d2\") " pod="openstack/manila-db-sync-2qjln" Oct 11 04:46:50 crc kubenswrapper[4798]: I1011 04:46:50.105526 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/24db8f3f-0f6f-4c88-be58-7de7bdd3d5d2-config-data\") pod \"manila-db-sync-2qjln\" (UID: \"24db8f3f-0f6f-4c88-be58-7de7bdd3d5d2\") " pod="openstack/manila-db-sync-2qjln" Oct 11 04:46:50 crc kubenswrapper[4798]: I1011 04:46:50.207203 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fclhq\" (UniqueName: \"kubernetes.io/projected/24db8f3f-0f6f-4c88-be58-7de7bdd3d5d2-kube-api-access-fclhq\") pod \"manila-db-sync-2qjln\" (UID: \"24db8f3f-0f6f-4c88-be58-7de7bdd3d5d2\") " pod="openstack/manila-db-sync-2qjln" Oct 11 04:46:50 crc kubenswrapper[4798]: I1011 04:46:50.207347 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"job-config-data\" (UniqueName: \"kubernetes.io/secret/24db8f3f-0f6f-4c88-be58-7de7bdd3d5d2-job-config-data\") pod \"manila-db-sync-2qjln\" (UID: \"24db8f3f-0f6f-4c88-be58-7de7bdd3d5d2\") " pod="openstack/manila-db-sync-2qjln" Oct 11 04:46:50 crc kubenswrapper[4798]: I1011 04:46:50.207382 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/24db8f3f-0f6f-4c88-be58-7de7bdd3d5d2-config-data\") pod \"manila-db-sync-2qjln\" (UID: \"24db8f3f-0f6f-4c88-be58-7de7bdd3d5d2\") " pod="openstack/manila-db-sync-2qjln" Oct 11 04:46:50 crc kubenswrapper[4798]: I1011 04:46:50.208324 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/24db8f3f-0f6f-4c88-be58-7de7bdd3d5d2-combined-ca-bundle\") pod \"manila-db-sync-2qjln\" (UID: \"24db8f3f-0f6f-4c88-be58-7de7bdd3d5d2\") " pod="openstack/manila-db-sync-2qjln" Oct 11 04:46:50 crc kubenswrapper[4798]: I1011 04:46:50.214753 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/24db8f3f-0f6f-4c88-be58-7de7bdd3d5d2-config-data\") pod \"manila-db-sync-2qjln\" (UID: \"24db8f3f-0f6f-4c88-be58-7de7bdd3d5d2\") " pod="openstack/manila-db-sync-2qjln" Oct 11 04:46:50 crc kubenswrapper[4798]: I1011 04:46:50.217370 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"job-config-data\" (UniqueName: \"kubernetes.io/secret/24db8f3f-0f6f-4c88-be58-7de7bdd3d5d2-job-config-data\") pod \"manila-db-sync-2qjln\" (UID: \"24db8f3f-0f6f-4c88-be58-7de7bdd3d5d2\") " pod="openstack/manila-db-sync-2qjln" Oct 11 04:46:50 crc kubenswrapper[4798]: I1011 04:46:50.236890 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/24db8f3f-0f6f-4c88-be58-7de7bdd3d5d2-combined-ca-bundle\") pod \"manila-db-sync-2qjln\" (UID: \"24db8f3f-0f6f-4c88-be58-7de7bdd3d5d2\") " pod="openstack/manila-db-sync-2qjln" Oct 11 04:46:50 crc kubenswrapper[4798]: I1011 04:46:50.238015 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fclhq\" (UniqueName: \"kubernetes.io/projected/24db8f3f-0f6f-4c88-be58-7de7bdd3d5d2-kube-api-access-fclhq\") pod \"manila-db-sync-2qjln\" (UID: \"24db8f3f-0f6f-4c88-be58-7de7bdd3d5d2\") " pod="openstack/manila-db-sync-2qjln" Oct 11 04:46:50 crc kubenswrapper[4798]: I1011 04:46:50.270341 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Oct 11 04:46:50 crc kubenswrapper[4798]: I1011 04:46:50.320003 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-db-sync-2qjln" Oct 11 04:46:50 crc kubenswrapper[4798]: I1011 04:46:50.430198 4798 scope.go:117] "RemoveContainer" containerID="999be6c75e4c41e3d1de7a3a50ee34084c0f4ead3f5f5fdebf32b1d0054c6b5f" Oct 11 04:46:50 crc kubenswrapper[4798]: E1011 04:46:50.430774 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h28s2_openshift-machine-config-operator(42571bc8-2186-4e3b-bba9-28f5a8f364d0)\"" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" Oct 11 04:46:50 crc kubenswrapper[4798]: I1011 04:46:50.486083 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-bf5b8bcbc-wjw4p" Oct 11 04:46:51 crc kubenswrapper[4798]: I1011 04:46:51.828147 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-db-sync-2qjln"] Oct 11 04:46:52 crc kubenswrapper[4798]: I1011 04:46:52.005458 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-db-sync-2qjln" event={"ID":"24db8f3f-0f6f-4c88-be58-7de7bdd3d5d2","Type":"ContainerStarted","Data":"7eeff0e5c2c3e7c80c04aa8f9f2a4c5c2c14636bf609646d40f30e5e6f8896c2"} Oct 11 04:46:53 crc kubenswrapper[4798]: I1011 04:46:53.136325 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-78d45886d-tlzfl" Oct 11 04:46:53 crc kubenswrapper[4798]: I1011 04:46:53.136404 4798 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-78d45886d-tlzfl" Oct 11 04:46:53 crc kubenswrapper[4798]: I1011 04:46:53.300002 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-7b5998ffb-nn2rn" Oct 11 04:46:53 crc kubenswrapper[4798]: I1011 04:46:53.300054 4798 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-7b5998ffb-nn2rn" Oct 11 04:46:58 crc kubenswrapper[4798]: I1011 04:46:58.096951 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-db-sync-2qjln" event={"ID":"24db8f3f-0f6f-4c88-be58-7de7bdd3d5d2","Type":"ContainerStarted","Data":"36de3b204f3c58d661bd6f6b68e8eaccd0c1efeecddab25a6706476386e43002"} Oct 11 04:46:58 crc kubenswrapper[4798]: I1011 04:46:58.128563 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/manila-db-sync-2qjln" podStartSLOduration=3.658347723 podStartE2EDuration="9.128514349s" podCreationTimestamp="2025-10-11 04:46:49 +0000 UTC" firstStartedPulling="2025-10-11 04:46:51.830248536 +0000 UTC m=+3107.166538222" lastFinishedPulling="2025-10-11 04:46:57.300415162 +0000 UTC m=+3112.636704848" observedRunningTime="2025-10-11 04:46:58.126346848 +0000 UTC m=+3113.462636554" watchObservedRunningTime="2025-10-11 04:46:58.128514349 +0000 UTC m=+3113.464804045" Oct 11 04:47:01 crc kubenswrapper[4798]: I1011 04:47:01.426654 4798 scope.go:117] "RemoveContainer" containerID="999be6c75e4c41e3d1de7a3a50ee34084c0f4ead3f5f5fdebf32b1d0054c6b5f" Oct 11 04:47:01 crc kubenswrapper[4798]: E1011 04:47:01.427571 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h28s2_openshift-machine-config-operator(42571bc8-2186-4e3b-bba9-28f5a8f364d0)\"" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" Oct 11 04:47:03 crc kubenswrapper[4798]: I1011 04:47:03.137619 4798 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-78d45886d-tlzfl" podUID="0c09ff0d-de2b-4e1d-b39f-f467c8b03fb7" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.242:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.242:8443: connect: connection refused" Oct 11 04:47:03 crc kubenswrapper[4798]: I1011 04:47:03.302105 4798 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-7b5998ffb-nn2rn" podUID="a3fe5ce5-561f-4775-8024-6c4896079b30" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.243:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.243:8443: connect: connection refused" Oct 11 04:47:09 crc kubenswrapper[4798]: I1011 04:47:09.236443 4798 generic.go:334] "Generic (PLEG): container finished" podID="24db8f3f-0f6f-4c88-be58-7de7bdd3d5d2" containerID="36de3b204f3c58d661bd6f6b68e8eaccd0c1efeecddab25a6706476386e43002" exitCode=0 Oct 11 04:47:09 crc kubenswrapper[4798]: I1011 04:47:09.237646 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-db-sync-2qjln" event={"ID":"24db8f3f-0f6f-4c88-be58-7de7bdd3d5d2","Type":"ContainerDied","Data":"36de3b204f3c58d661bd6f6b68e8eaccd0c1efeecddab25a6706476386e43002"} Oct 11 04:47:10 crc kubenswrapper[4798]: I1011 04:47:10.739705 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/manila-db-sync-2qjln" Oct 11 04:47:10 crc kubenswrapper[4798]: I1011 04:47:10.805602 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/24db8f3f-0f6f-4c88-be58-7de7bdd3d5d2-combined-ca-bundle\") pod \"24db8f3f-0f6f-4c88-be58-7de7bdd3d5d2\" (UID: \"24db8f3f-0f6f-4c88-be58-7de7bdd3d5d2\") " Oct 11 04:47:10 crc kubenswrapper[4798]: I1011 04:47:10.805912 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/24db8f3f-0f6f-4c88-be58-7de7bdd3d5d2-config-data\") pod \"24db8f3f-0f6f-4c88-be58-7de7bdd3d5d2\" (UID: \"24db8f3f-0f6f-4c88-be58-7de7bdd3d5d2\") " Oct 11 04:47:10 crc kubenswrapper[4798]: I1011 04:47:10.805955 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"job-config-data\" (UniqueName: \"kubernetes.io/secret/24db8f3f-0f6f-4c88-be58-7de7bdd3d5d2-job-config-data\") pod \"24db8f3f-0f6f-4c88-be58-7de7bdd3d5d2\" (UID: \"24db8f3f-0f6f-4c88-be58-7de7bdd3d5d2\") " Oct 11 04:47:10 crc kubenswrapper[4798]: I1011 04:47:10.806081 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fclhq\" (UniqueName: \"kubernetes.io/projected/24db8f3f-0f6f-4c88-be58-7de7bdd3d5d2-kube-api-access-fclhq\") pod \"24db8f3f-0f6f-4c88-be58-7de7bdd3d5d2\" (UID: \"24db8f3f-0f6f-4c88-be58-7de7bdd3d5d2\") " Oct 11 04:47:10 crc kubenswrapper[4798]: I1011 04:47:10.815366 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/24db8f3f-0f6f-4c88-be58-7de7bdd3d5d2-job-config-data" (OuterVolumeSpecName: "job-config-data") pod "24db8f3f-0f6f-4c88-be58-7de7bdd3d5d2" (UID: "24db8f3f-0f6f-4c88-be58-7de7bdd3d5d2"). InnerVolumeSpecName "job-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:47:10 crc kubenswrapper[4798]: I1011 04:47:10.815765 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/24db8f3f-0f6f-4c88-be58-7de7bdd3d5d2-kube-api-access-fclhq" (OuterVolumeSpecName: "kube-api-access-fclhq") pod "24db8f3f-0f6f-4c88-be58-7de7bdd3d5d2" (UID: "24db8f3f-0f6f-4c88-be58-7de7bdd3d5d2"). InnerVolumeSpecName "kube-api-access-fclhq". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 04:47:10 crc kubenswrapper[4798]: I1011 04:47:10.827635 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/24db8f3f-0f6f-4c88-be58-7de7bdd3d5d2-config-data" (OuterVolumeSpecName: "config-data") pod "24db8f3f-0f6f-4c88-be58-7de7bdd3d5d2" (UID: "24db8f3f-0f6f-4c88-be58-7de7bdd3d5d2"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:47:10 crc kubenswrapper[4798]: I1011 04:47:10.853841 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/24db8f3f-0f6f-4c88-be58-7de7bdd3d5d2-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "24db8f3f-0f6f-4c88-be58-7de7bdd3d5d2" (UID: "24db8f3f-0f6f-4c88-be58-7de7bdd3d5d2"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:47:10 crc kubenswrapper[4798]: I1011 04:47:10.908864 4798 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/24db8f3f-0f6f-4c88-be58-7de7bdd3d5d2-config-data\") on node \"crc\" DevicePath \"\"" Oct 11 04:47:10 crc kubenswrapper[4798]: I1011 04:47:10.908905 4798 reconciler_common.go:293] "Volume detached for volume \"job-config-data\" (UniqueName: \"kubernetes.io/secret/24db8f3f-0f6f-4c88-be58-7de7bdd3d5d2-job-config-data\") on node \"crc\" DevicePath \"\"" Oct 11 04:47:10 crc kubenswrapper[4798]: I1011 04:47:10.908916 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fclhq\" (UniqueName: \"kubernetes.io/projected/24db8f3f-0f6f-4c88-be58-7de7bdd3d5d2-kube-api-access-fclhq\") on node \"crc\" DevicePath \"\"" Oct 11 04:47:10 crc kubenswrapper[4798]: I1011 04:47:10.908930 4798 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/24db8f3f-0f6f-4c88-be58-7de7bdd3d5d2-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 11 04:47:11 crc kubenswrapper[4798]: I1011 04:47:11.265871 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-db-sync-2qjln" event={"ID":"24db8f3f-0f6f-4c88-be58-7de7bdd3d5d2","Type":"ContainerDied","Data":"7eeff0e5c2c3e7c80c04aa8f9f2a4c5c2c14636bf609646d40f30e5e6f8896c2"} Oct 11 04:47:11 crc kubenswrapper[4798]: I1011 04:47:11.265925 4798 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7eeff0e5c2c3e7c80c04aa8f9f2a4c5c2c14636bf609646d40f30e5e6f8896c2" Oct 11 04:47:11 crc kubenswrapper[4798]: I1011 04:47:11.266006 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/manila-db-sync-2qjln" Oct 11 04:47:11 crc kubenswrapper[4798]: I1011 04:47:11.691625 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/manila-share-share1-0"] Oct 11 04:47:11 crc kubenswrapper[4798]: E1011 04:47:11.697098 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="24db8f3f-0f6f-4c88-be58-7de7bdd3d5d2" containerName="manila-db-sync" Oct 11 04:47:11 crc kubenswrapper[4798]: I1011 04:47:11.697122 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="24db8f3f-0f6f-4c88-be58-7de7bdd3d5d2" containerName="manila-db-sync" Oct 11 04:47:11 crc kubenswrapper[4798]: I1011 04:47:11.697336 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="24db8f3f-0f6f-4c88-be58-7de7bdd3d5d2" containerName="manila-db-sync" Oct 11 04:47:11 crc kubenswrapper[4798]: I1011 04:47:11.698515 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-share-share1-0" Oct 11 04:47:11 crc kubenswrapper[4798]: I1011 04:47:11.701676 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"manila-share-share1-config-data" Oct 11 04:47:11 crc kubenswrapper[4798]: I1011 04:47:11.702073 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"manila-scripts" Oct 11 04:47:11 crc kubenswrapper[4798]: I1011 04:47:11.702309 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"manila-config-data" Oct 11 04:47:11 crc kubenswrapper[4798]: I1011 04:47:11.714686 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"manila-manila-dockercfg-r6gpr" Oct 11 04:47:11 crc kubenswrapper[4798]: I1011 04:47:11.724714 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-share-share1-0"] Oct 11 04:47:11 crc kubenswrapper[4798]: I1011 04:47:11.728909 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rx5r5\" (UniqueName: \"kubernetes.io/projected/3aa8fa74-0fd3-468d-935b-a819f2383dfb-kube-api-access-rx5r5\") pod \"manila-share-share1-0\" (UID: \"3aa8fa74-0fd3-468d-935b-a819f2383dfb\") " pod="openstack/manila-share-share1-0" Oct 11 04:47:11 crc kubenswrapper[4798]: I1011 04:47:11.729000 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/3aa8fa74-0fd3-468d-935b-a819f2383dfb-etc-machine-id\") pod \"manila-share-share1-0\" (UID: \"3aa8fa74-0fd3-468d-935b-a819f2383dfb\") " pod="openstack/manila-share-share1-0" Oct 11 04:47:11 crc kubenswrapper[4798]: I1011 04:47:11.729027 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3aa8fa74-0fd3-468d-935b-a819f2383dfb-scripts\") pod \"manila-share-share1-0\" (UID: \"3aa8fa74-0fd3-468d-935b-a819f2383dfb\") " pod="openstack/manila-share-share1-0" Oct 11 04:47:11 crc kubenswrapper[4798]: I1011 04:47:11.729070 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/3aa8fa74-0fd3-468d-935b-a819f2383dfb-ceph\") pod \"manila-share-share1-0\" (UID: \"3aa8fa74-0fd3-468d-935b-a819f2383dfb\") " pod="openstack/manila-share-share1-0" Oct 11 04:47:11 crc kubenswrapper[4798]: I1011 04:47:11.729112 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-manila\" (UniqueName: \"kubernetes.io/host-path/3aa8fa74-0fd3-468d-935b-a819f2383dfb-var-lib-manila\") pod \"manila-share-share1-0\" (UID: \"3aa8fa74-0fd3-468d-935b-a819f2383dfb\") " pod="openstack/manila-share-share1-0" Oct 11 04:47:11 crc kubenswrapper[4798]: I1011 04:47:11.729133 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3aa8fa74-0fd3-468d-935b-a819f2383dfb-combined-ca-bundle\") pod \"manila-share-share1-0\" (UID: \"3aa8fa74-0fd3-468d-935b-a819f2383dfb\") " pod="openstack/manila-share-share1-0" Oct 11 04:47:11 crc kubenswrapper[4798]: I1011 04:47:11.729160 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/3aa8fa74-0fd3-468d-935b-a819f2383dfb-config-data-custom\") pod \"manila-share-share1-0\" (UID: \"3aa8fa74-0fd3-468d-935b-a819f2383dfb\") " pod="openstack/manila-share-share1-0" Oct 11 04:47:11 crc kubenswrapper[4798]: I1011 04:47:11.729191 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3aa8fa74-0fd3-468d-935b-a819f2383dfb-config-data\") pod \"manila-share-share1-0\" (UID: \"3aa8fa74-0fd3-468d-935b-a819f2383dfb\") " pod="openstack/manila-share-share1-0" Oct 11 04:47:11 crc kubenswrapper[4798]: I1011 04:47:11.786084 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-69655fd4bf-bvxl4"] Oct 11 04:47:11 crc kubenswrapper[4798]: I1011 04:47:11.788535 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-69655fd4bf-bvxl4" Oct 11 04:47:11 crc kubenswrapper[4798]: I1011 04:47:11.814927 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-69655fd4bf-bvxl4"] Oct 11 04:47:11 crc kubenswrapper[4798]: I1011 04:47:11.854278 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/manila-scheduler-0"] Oct 11 04:47:11 crc kubenswrapper[4798]: I1011 04:47:11.854933 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d5af4cb1-380b-4a81-87ff-b1eb52952273-ovsdbserver-sb\") pod \"dnsmasq-dns-69655fd4bf-bvxl4\" (UID: \"d5af4cb1-380b-4a81-87ff-b1eb52952273\") " pod="openstack/dnsmasq-dns-69655fd4bf-bvxl4" Oct 11 04:47:11 crc kubenswrapper[4798]: I1011 04:47:11.855072 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-59n49\" (UniqueName: \"kubernetes.io/projected/d5af4cb1-380b-4a81-87ff-b1eb52952273-kube-api-access-59n49\") pod \"dnsmasq-dns-69655fd4bf-bvxl4\" (UID: \"d5af4cb1-380b-4a81-87ff-b1eb52952273\") " pod="openstack/dnsmasq-dns-69655fd4bf-bvxl4" Oct 11 04:47:11 crc kubenswrapper[4798]: I1011 04:47:11.855361 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rx5r5\" (UniqueName: \"kubernetes.io/projected/3aa8fa74-0fd3-468d-935b-a819f2383dfb-kube-api-access-rx5r5\") pod \"manila-share-share1-0\" (UID: \"3aa8fa74-0fd3-468d-935b-a819f2383dfb\") " pod="openstack/manila-share-share1-0" Oct 11 04:47:11 crc kubenswrapper[4798]: I1011 04:47:11.855703 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d5af4cb1-380b-4a81-87ff-b1eb52952273-config\") pod \"dnsmasq-dns-69655fd4bf-bvxl4\" (UID: \"d5af4cb1-380b-4a81-87ff-b1eb52952273\") " pod="openstack/dnsmasq-dns-69655fd4bf-bvxl4" Oct 11 04:47:11 crc kubenswrapper[4798]: I1011 04:47:11.855754 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/3aa8fa74-0fd3-468d-935b-a819f2383dfb-etc-machine-id\") pod \"manila-share-share1-0\" (UID: \"3aa8fa74-0fd3-468d-935b-a819f2383dfb\") " pod="openstack/manila-share-share1-0" Oct 11 04:47:11 crc kubenswrapper[4798]: I1011 04:47:11.855797 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3aa8fa74-0fd3-468d-935b-a819f2383dfb-scripts\") pod \"manila-share-share1-0\" (UID: \"3aa8fa74-0fd3-468d-935b-a819f2383dfb\") " pod="openstack/manila-share-share1-0" Oct 11 04:47:11 crc kubenswrapper[4798]: I1011 04:47:11.855897 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/3aa8fa74-0fd3-468d-935b-a819f2383dfb-etc-machine-id\") pod \"manila-share-share1-0\" (UID: \"3aa8fa74-0fd3-468d-935b-a819f2383dfb\") " pod="openstack/manila-share-share1-0" Oct 11 04:47:11 crc kubenswrapper[4798]: I1011 04:47:11.855917 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d5af4cb1-380b-4a81-87ff-b1eb52952273-ovsdbserver-nb\") pod \"dnsmasq-dns-69655fd4bf-bvxl4\" (UID: \"d5af4cb1-380b-4a81-87ff-b1eb52952273\") " pod="openstack/dnsmasq-dns-69655fd4bf-bvxl4" Oct 11 04:47:11 crc kubenswrapper[4798]: I1011 04:47:11.856058 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/3aa8fa74-0fd3-468d-935b-a819f2383dfb-ceph\") pod \"manila-share-share1-0\" (UID: \"3aa8fa74-0fd3-468d-935b-a819f2383dfb\") " pod="openstack/manila-share-share1-0" Oct 11 04:47:11 crc kubenswrapper[4798]: I1011 04:47:11.856128 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d5af4cb1-380b-4a81-87ff-b1eb52952273-dns-svc\") pod \"dnsmasq-dns-69655fd4bf-bvxl4\" (UID: \"d5af4cb1-380b-4a81-87ff-b1eb52952273\") " pod="openstack/dnsmasq-dns-69655fd4bf-bvxl4" Oct 11 04:47:11 crc kubenswrapper[4798]: I1011 04:47:11.856247 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-manila\" (UniqueName: \"kubernetes.io/host-path/3aa8fa74-0fd3-468d-935b-a819f2383dfb-var-lib-manila\") pod \"manila-share-share1-0\" (UID: \"3aa8fa74-0fd3-468d-935b-a819f2383dfb\") " pod="openstack/manila-share-share1-0" Oct 11 04:47:11 crc kubenswrapper[4798]: I1011 04:47:11.856271 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3aa8fa74-0fd3-468d-935b-a819f2383dfb-combined-ca-bundle\") pod \"manila-share-share1-0\" (UID: \"3aa8fa74-0fd3-468d-935b-a819f2383dfb\") " pod="openstack/manila-share-share1-0" Oct 11 04:47:11 crc kubenswrapper[4798]: I1011 04:47:11.856329 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/3aa8fa74-0fd3-468d-935b-a819f2383dfb-config-data-custom\") pod \"manila-share-share1-0\" (UID: \"3aa8fa74-0fd3-468d-935b-a819f2383dfb\") " pod="openstack/manila-share-share1-0" Oct 11 04:47:11 crc kubenswrapper[4798]: I1011 04:47:11.856361 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/d5af4cb1-380b-4a81-87ff-b1eb52952273-openstack-edpm-ipam\") pod \"dnsmasq-dns-69655fd4bf-bvxl4\" (UID: \"d5af4cb1-380b-4a81-87ff-b1eb52952273\") " pod="openstack/dnsmasq-dns-69655fd4bf-bvxl4" Oct 11 04:47:11 crc kubenswrapper[4798]: I1011 04:47:11.856453 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3aa8fa74-0fd3-468d-935b-a819f2383dfb-config-data\") pod \"manila-share-share1-0\" (UID: \"3aa8fa74-0fd3-468d-935b-a819f2383dfb\") " pod="openstack/manila-share-share1-0" Oct 11 04:47:11 crc kubenswrapper[4798]: I1011 04:47:11.858034 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-manila\" (UniqueName: \"kubernetes.io/host-path/3aa8fa74-0fd3-468d-935b-a819f2383dfb-var-lib-manila\") pod \"manila-share-share1-0\" (UID: \"3aa8fa74-0fd3-468d-935b-a819f2383dfb\") " pod="openstack/manila-share-share1-0" Oct 11 04:47:11 crc kubenswrapper[4798]: I1011 04:47:11.860510 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-scheduler-0" Oct 11 04:47:11 crc kubenswrapper[4798]: I1011 04:47:11.865314 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"manila-scheduler-config-data" Oct 11 04:47:11 crc kubenswrapper[4798]: I1011 04:47:11.866441 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/3aa8fa74-0fd3-468d-935b-a819f2383dfb-ceph\") pod \"manila-share-share1-0\" (UID: \"3aa8fa74-0fd3-468d-935b-a819f2383dfb\") " pod="openstack/manila-share-share1-0" Oct 11 04:47:11 crc kubenswrapper[4798]: I1011 04:47:11.869673 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3aa8fa74-0fd3-468d-935b-a819f2383dfb-config-data\") pod \"manila-share-share1-0\" (UID: \"3aa8fa74-0fd3-468d-935b-a819f2383dfb\") " pod="openstack/manila-share-share1-0" Oct 11 04:47:11 crc kubenswrapper[4798]: I1011 04:47:11.872176 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/3aa8fa74-0fd3-468d-935b-a819f2383dfb-config-data-custom\") pod \"manila-share-share1-0\" (UID: \"3aa8fa74-0fd3-468d-935b-a819f2383dfb\") " pod="openstack/manila-share-share1-0" Oct 11 04:47:11 crc kubenswrapper[4798]: I1011 04:47:11.872687 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3aa8fa74-0fd3-468d-935b-a819f2383dfb-scripts\") pod \"manila-share-share1-0\" (UID: \"3aa8fa74-0fd3-468d-935b-a819f2383dfb\") " pod="openstack/manila-share-share1-0" Oct 11 04:47:11 crc kubenswrapper[4798]: I1011 04:47:11.877181 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3aa8fa74-0fd3-468d-935b-a819f2383dfb-combined-ca-bundle\") pod \"manila-share-share1-0\" (UID: \"3aa8fa74-0fd3-468d-935b-a819f2383dfb\") " pod="openstack/manila-share-share1-0" Oct 11 04:47:11 crc kubenswrapper[4798]: I1011 04:47:11.915938 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rx5r5\" (UniqueName: \"kubernetes.io/projected/3aa8fa74-0fd3-468d-935b-a819f2383dfb-kube-api-access-rx5r5\") pod \"manila-share-share1-0\" (UID: \"3aa8fa74-0fd3-468d-935b-a819f2383dfb\") " pod="openstack/manila-share-share1-0" Oct 11 04:47:11 crc kubenswrapper[4798]: I1011 04:47:11.916547 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-scheduler-0"] Oct 11 04:47:11 crc kubenswrapper[4798]: I1011 04:47:11.970969 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/52f182f2-1e00-4c5d-9101-dff47f7c7b85-scripts\") pod \"manila-scheduler-0\" (UID: \"52f182f2-1e00-4c5d-9101-dff47f7c7b85\") " pod="openstack/manila-scheduler-0" Oct 11 04:47:11 crc kubenswrapper[4798]: I1011 04:47:11.971094 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/52f182f2-1e00-4c5d-9101-dff47f7c7b85-etc-machine-id\") pod \"manila-scheduler-0\" (UID: \"52f182f2-1e00-4c5d-9101-dff47f7c7b85\") " pod="openstack/manila-scheduler-0" Oct 11 04:47:11 crc kubenswrapper[4798]: I1011 04:47:11.971179 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d5af4cb1-380b-4a81-87ff-b1eb52952273-config\") pod \"dnsmasq-dns-69655fd4bf-bvxl4\" (UID: \"d5af4cb1-380b-4a81-87ff-b1eb52952273\") " pod="openstack/dnsmasq-dns-69655fd4bf-bvxl4" Oct 11 04:47:11 crc kubenswrapper[4798]: I1011 04:47:11.971246 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xj85d\" (UniqueName: \"kubernetes.io/projected/52f182f2-1e00-4c5d-9101-dff47f7c7b85-kube-api-access-xj85d\") pod \"manila-scheduler-0\" (UID: \"52f182f2-1e00-4c5d-9101-dff47f7c7b85\") " pod="openstack/manila-scheduler-0" Oct 11 04:47:11 crc kubenswrapper[4798]: I1011 04:47:11.971304 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d5af4cb1-380b-4a81-87ff-b1eb52952273-ovsdbserver-nb\") pod \"dnsmasq-dns-69655fd4bf-bvxl4\" (UID: \"d5af4cb1-380b-4a81-87ff-b1eb52952273\") " pod="openstack/dnsmasq-dns-69655fd4bf-bvxl4" Oct 11 04:47:11 crc kubenswrapper[4798]: I1011 04:47:11.971532 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d5af4cb1-380b-4a81-87ff-b1eb52952273-dns-svc\") pod \"dnsmasq-dns-69655fd4bf-bvxl4\" (UID: \"d5af4cb1-380b-4a81-87ff-b1eb52952273\") " pod="openstack/dnsmasq-dns-69655fd4bf-bvxl4" Oct 11 04:47:11 crc kubenswrapper[4798]: I1011 04:47:11.972021 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/52f182f2-1e00-4c5d-9101-dff47f7c7b85-config-data\") pod \"manila-scheduler-0\" (UID: \"52f182f2-1e00-4c5d-9101-dff47f7c7b85\") " pod="openstack/manila-scheduler-0" Oct 11 04:47:11 crc kubenswrapper[4798]: I1011 04:47:11.972059 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/d5af4cb1-380b-4a81-87ff-b1eb52952273-openstack-edpm-ipam\") pod \"dnsmasq-dns-69655fd4bf-bvxl4\" (UID: \"d5af4cb1-380b-4a81-87ff-b1eb52952273\") " pod="openstack/dnsmasq-dns-69655fd4bf-bvxl4" Oct 11 04:47:11 crc kubenswrapper[4798]: I1011 04:47:11.972301 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d5af4cb1-380b-4a81-87ff-b1eb52952273-ovsdbserver-sb\") pod \"dnsmasq-dns-69655fd4bf-bvxl4\" (UID: \"d5af4cb1-380b-4a81-87ff-b1eb52952273\") " pod="openstack/dnsmasq-dns-69655fd4bf-bvxl4" Oct 11 04:47:11 crc kubenswrapper[4798]: I1011 04:47:11.972472 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-59n49\" (UniqueName: \"kubernetes.io/projected/d5af4cb1-380b-4a81-87ff-b1eb52952273-kube-api-access-59n49\") pod \"dnsmasq-dns-69655fd4bf-bvxl4\" (UID: \"d5af4cb1-380b-4a81-87ff-b1eb52952273\") " pod="openstack/dnsmasq-dns-69655fd4bf-bvxl4" Oct 11 04:47:11 crc kubenswrapper[4798]: I1011 04:47:11.972551 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/52f182f2-1e00-4c5d-9101-dff47f7c7b85-config-data-custom\") pod \"manila-scheduler-0\" (UID: \"52f182f2-1e00-4c5d-9101-dff47f7c7b85\") " pod="openstack/manila-scheduler-0" Oct 11 04:47:11 crc kubenswrapper[4798]: I1011 04:47:11.972660 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/52f182f2-1e00-4c5d-9101-dff47f7c7b85-combined-ca-bundle\") pod \"manila-scheduler-0\" (UID: \"52f182f2-1e00-4c5d-9101-dff47f7c7b85\") " pod="openstack/manila-scheduler-0" Oct 11 04:47:11 crc kubenswrapper[4798]: I1011 04:47:11.974176 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d5af4cb1-380b-4a81-87ff-b1eb52952273-ovsdbserver-nb\") pod \"dnsmasq-dns-69655fd4bf-bvxl4\" (UID: \"d5af4cb1-380b-4a81-87ff-b1eb52952273\") " pod="openstack/dnsmasq-dns-69655fd4bf-bvxl4" Oct 11 04:47:11 crc kubenswrapper[4798]: I1011 04:47:11.974174 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d5af4cb1-380b-4a81-87ff-b1eb52952273-config\") pod \"dnsmasq-dns-69655fd4bf-bvxl4\" (UID: \"d5af4cb1-380b-4a81-87ff-b1eb52952273\") " pod="openstack/dnsmasq-dns-69655fd4bf-bvxl4" Oct 11 04:47:11 crc kubenswrapper[4798]: I1011 04:47:11.974305 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d5af4cb1-380b-4a81-87ff-b1eb52952273-dns-svc\") pod \"dnsmasq-dns-69655fd4bf-bvxl4\" (UID: \"d5af4cb1-380b-4a81-87ff-b1eb52952273\") " pod="openstack/dnsmasq-dns-69655fd4bf-bvxl4" Oct 11 04:47:11 crc kubenswrapper[4798]: I1011 04:47:11.975131 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d5af4cb1-380b-4a81-87ff-b1eb52952273-ovsdbserver-sb\") pod \"dnsmasq-dns-69655fd4bf-bvxl4\" (UID: \"d5af4cb1-380b-4a81-87ff-b1eb52952273\") " pod="openstack/dnsmasq-dns-69655fd4bf-bvxl4" Oct 11 04:47:11 crc kubenswrapper[4798]: I1011 04:47:11.975418 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/d5af4cb1-380b-4a81-87ff-b1eb52952273-openstack-edpm-ipam\") pod \"dnsmasq-dns-69655fd4bf-bvxl4\" (UID: \"d5af4cb1-380b-4a81-87ff-b1eb52952273\") " pod="openstack/dnsmasq-dns-69655fd4bf-bvxl4" Oct 11 04:47:11 crc kubenswrapper[4798]: I1011 04:47:11.997012 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-59n49\" (UniqueName: \"kubernetes.io/projected/d5af4cb1-380b-4a81-87ff-b1eb52952273-kube-api-access-59n49\") pod \"dnsmasq-dns-69655fd4bf-bvxl4\" (UID: \"d5af4cb1-380b-4a81-87ff-b1eb52952273\") " pod="openstack/dnsmasq-dns-69655fd4bf-bvxl4" Oct 11 04:47:12 crc kubenswrapper[4798]: I1011 04:47:12.042760 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-share-share1-0" Oct 11 04:47:12 crc kubenswrapper[4798]: I1011 04:47:12.075567 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/52f182f2-1e00-4c5d-9101-dff47f7c7b85-config-data\") pod \"manila-scheduler-0\" (UID: \"52f182f2-1e00-4c5d-9101-dff47f7c7b85\") " pod="openstack/manila-scheduler-0" Oct 11 04:47:12 crc kubenswrapper[4798]: I1011 04:47:12.076579 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/52f182f2-1e00-4c5d-9101-dff47f7c7b85-config-data-custom\") pod \"manila-scheduler-0\" (UID: \"52f182f2-1e00-4c5d-9101-dff47f7c7b85\") " pod="openstack/manila-scheduler-0" Oct 11 04:47:12 crc kubenswrapper[4798]: I1011 04:47:12.076634 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/52f182f2-1e00-4c5d-9101-dff47f7c7b85-combined-ca-bundle\") pod \"manila-scheduler-0\" (UID: \"52f182f2-1e00-4c5d-9101-dff47f7c7b85\") " pod="openstack/manila-scheduler-0" Oct 11 04:47:12 crc kubenswrapper[4798]: I1011 04:47:12.076699 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/52f182f2-1e00-4c5d-9101-dff47f7c7b85-scripts\") pod \"manila-scheduler-0\" (UID: \"52f182f2-1e00-4c5d-9101-dff47f7c7b85\") " pod="openstack/manila-scheduler-0" Oct 11 04:47:12 crc kubenswrapper[4798]: I1011 04:47:12.076771 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/52f182f2-1e00-4c5d-9101-dff47f7c7b85-etc-machine-id\") pod \"manila-scheduler-0\" (UID: \"52f182f2-1e00-4c5d-9101-dff47f7c7b85\") " pod="openstack/manila-scheduler-0" Oct 11 04:47:12 crc kubenswrapper[4798]: I1011 04:47:12.076875 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xj85d\" (UniqueName: \"kubernetes.io/projected/52f182f2-1e00-4c5d-9101-dff47f7c7b85-kube-api-access-xj85d\") pod \"manila-scheduler-0\" (UID: \"52f182f2-1e00-4c5d-9101-dff47f7c7b85\") " pod="openstack/manila-scheduler-0" Oct 11 04:47:12 crc kubenswrapper[4798]: I1011 04:47:12.077978 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/52f182f2-1e00-4c5d-9101-dff47f7c7b85-etc-machine-id\") pod \"manila-scheduler-0\" (UID: \"52f182f2-1e00-4c5d-9101-dff47f7c7b85\") " pod="openstack/manila-scheduler-0" Oct 11 04:47:12 crc kubenswrapper[4798]: I1011 04:47:12.078979 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/manila-api-0"] Oct 11 04:47:12 crc kubenswrapper[4798]: I1011 04:47:12.080346 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/52f182f2-1e00-4c5d-9101-dff47f7c7b85-config-data\") pod \"manila-scheduler-0\" (UID: \"52f182f2-1e00-4c5d-9101-dff47f7c7b85\") " pod="openstack/manila-scheduler-0" Oct 11 04:47:12 crc kubenswrapper[4798]: I1011 04:47:12.080993 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-api-0" Oct 11 04:47:12 crc kubenswrapper[4798]: I1011 04:47:12.082800 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/52f182f2-1e00-4c5d-9101-dff47f7c7b85-config-data-custom\") pod \"manila-scheduler-0\" (UID: \"52f182f2-1e00-4c5d-9101-dff47f7c7b85\") " pod="openstack/manila-scheduler-0" Oct 11 04:47:12 crc kubenswrapper[4798]: I1011 04:47:12.088113 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/52f182f2-1e00-4c5d-9101-dff47f7c7b85-combined-ca-bundle\") pod \"manila-scheduler-0\" (UID: \"52f182f2-1e00-4c5d-9101-dff47f7c7b85\") " pod="openstack/manila-scheduler-0" Oct 11 04:47:12 crc kubenswrapper[4798]: I1011 04:47:12.088430 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/52f182f2-1e00-4c5d-9101-dff47f7c7b85-scripts\") pod \"manila-scheduler-0\" (UID: \"52f182f2-1e00-4c5d-9101-dff47f7c7b85\") " pod="openstack/manila-scheduler-0" Oct 11 04:47:12 crc kubenswrapper[4798]: I1011 04:47:12.088953 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"manila-api-config-data" Oct 11 04:47:12 crc kubenswrapper[4798]: I1011 04:47:12.105372 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-api-0"] Oct 11 04:47:12 crc kubenswrapper[4798]: I1011 04:47:12.108480 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xj85d\" (UniqueName: \"kubernetes.io/projected/52f182f2-1e00-4c5d-9101-dff47f7c7b85-kube-api-access-xj85d\") pod \"manila-scheduler-0\" (UID: \"52f182f2-1e00-4c5d-9101-dff47f7c7b85\") " pod="openstack/manila-scheduler-0" Oct 11 04:47:12 crc kubenswrapper[4798]: I1011 04:47:12.119096 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-69655fd4bf-bvxl4" Oct 11 04:47:12 crc kubenswrapper[4798]: I1011 04:47:12.184998 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/9d17a20d-cad0-45bc-86dd-91025c00cfb4-etc-machine-id\") pod \"manila-api-0\" (UID: \"9d17a20d-cad0-45bc-86dd-91025c00cfb4\") " pod="openstack/manila-api-0" Oct 11 04:47:12 crc kubenswrapper[4798]: I1011 04:47:12.185057 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hlggx\" (UniqueName: \"kubernetes.io/projected/9d17a20d-cad0-45bc-86dd-91025c00cfb4-kube-api-access-hlggx\") pod \"manila-api-0\" (UID: \"9d17a20d-cad0-45bc-86dd-91025c00cfb4\") " pod="openstack/manila-api-0" Oct 11 04:47:12 crc kubenswrapper[4798]: I1011 04:47:12.185161 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9d17a20d-cad0-45bc-86dd-91025c00cfb4-combined-ca-bundle\") pod \"manila-api-0\" (UID: \"9d17a20d-cad0-45bc-86dd-91025c00cfb4\") " pod="openstack/manila-api-0" Oct 11 04:47:12 crc kubenswrapper[4798]: I1011 04:47:12.185715 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9d17a20d-cad0-45bc-86dd-91025c00cfb4-config-data\") pod \"manila-api-0\" (UID: \"9d17a20d-cad0-45bc-86dd-91025c00cfb4\") " pod="openstack/manila-api-0" Oct 11 04:47:12 crc kubenswrapper[4798]: I1011 04:47:12.185765 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9d17a20d-cad0-45bc-86dd-91025c00cfb4-logs\") pod \"manila-api-0\" (UID: \"9d17a20d-cad0-45bc-86dd-91025c00cfb4\") " pod="openstack/manila-api-0" Oct 11 04:47:12 crc kubenswrapper[4798]: I1011 04:47:12.185811 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9d17a20d-cad0-45bc-86dd-91025c00cfb4-scripts\") pod \"manila-api-0\" (UID: \"9d17a20d-cad0-45bc-86dd-91025c00cfb4\") " pod="openstack/manila-api-0" Oct 11 04:47:12 crc kubenswrapper[4798]: I1011 04:47:12.186426 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/9d17a20d-cad0-45bc-86dd-91025c00cfb4-config-data-custom\") pod \"manila-api-0\" (UID: \"9d17a20d-cad0-45bc-86dd-91025c00cfb4\") " pod="openstack/manila-api-0" Oct 11 04:47:12 crc kubenswrapper[4798]: I1011 04:47:12.288798 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-scheduler-0" Oct 11 04:47:12 crc kubenswrapper[4798]: I1011 04:47:12.289507 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/9d17a20d-cad0-45bc-86dd-91025c00cfb4-etc-machine-id\") pod \"manila-api-0\" (UID: \"9d17a20d-cad0-45bc-86dd-91025c00cfb4\") " pod="openstack/manila-api-0" Oct 11 04:47:12 crc kubenswrapper[4798]: I1011 04:47:12.289557 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hlggx\" (UniqueName: \"kubernetes.io/projected/9d17a20d-cad0-45bc-86dd-91025c00cfb4-kube-api-access-hlggx\") pod \"manila-api-0\" (UID: \"9d17a20d-cad0-45bc-86dd-91025c00cfb4\") " pod="openstack/manila-api-0" Oct 11 04:47:12 crc kubenswrapper[4798]: I1011 04:47:12.289672 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/9d17a20d-cad0-45bc-86dd-91025c00cfb4-etc-machine-id\") pod \"manila-api-0\" (UID: \"9d17a20d-cad0-45bc-86dd-91025c00cfb4\") " pod="openstack/manila-api-0" Oct 11 04:47:12 crc kubenswrapper[4798]: I1011 04:47:12.289707 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9d17a20d-cad0-45bc-86dd-91025c00cfb4-combined-ca-bundle\") pod \"manila-api-0\" (UID: \"9d17a20d-cad0-45bc-86dd-91025c00cfb4\") " pod="openstack/manila-api-0" Oct 11 04:47:12 crc kubenswrapper[4798]: I1011 04:47:12.289776 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9d17a20d-cad0-45bc-86dd-91025c00cfb4-config-data\") pod \"manila-api-0\" (UID: \"9d17a20d-cad0-45bc-86dd-91025c00cfb4\") " pod="openstack/manila-api-0" Oct 11 04:47:12 crc kubenswrapper[4798]: I1011 04:47:12.290058 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9d17a20d-cad0-45bc-86dd-91025c00cfb4-logs\") pod \"manila-api-0\" (UID: \"9d17a20d-cad0-45bc-86dd-91025c00cfb4\") " pod="openstack/manila-api-0" Oct 11 04:47:12 crc kubenswrapper[4798]: I1011 04:47:12.290126 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9d17a20d-cad0-45bc-86dd-91025c00cfb4-scripts\") pod \"manila-api-0\" (UID: \"9d17a20d-cad0-45bc-86dd-91025c00cfb4\") " pod="openstack/manila-api-0" Oct 11 04:47:12 crc kubenswrapper[4798]: I1011 04:47:12.290354 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/9d17a20d-cad0-45bc-86dd-91025c00cfb4-config-data-custom\") pod \"manila-api-0\" (UID: \"9d17a20d-cad0-45bc-86dd-91025c00cfb4\") " pod="openstack/manila-api-0" Oct 11 04:47:12 crc kubenswrapper[4798]: I1011 04:47:12.292084 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9d17a20d-cad0-45bc-86dd-91025c00cfb4-logs\") pod \"manila-api-0\" (UID: \"9d17a20d-cad0-45bc-86dd-91025c00cfb4\") " pod="openstack/manila-api-0" Oct 11 04:47:12 crc kubenswrapper[4798]: I1011 04:47:12.296573 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9d17a20d-cad0-45bc-86dd-91025c00cfb4-scripts\") pod \"manila-api-0\" (UID: \"9d17a20d-cad0-45bc-86dd-91025c00cfb4\") " pod="openstack/manila-api-0" Oct 11 04:47:12 crc kubenswrapper[4798]: I1011 04:47:12.296772 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9d17a20d-cad0-45bc-86dd-91025c00cfb4-config-data\") pod \"manila-api-0\" (UID: \"9d17a20d-cad0-45bc-86dd-91025c00cfb4\") " pod="openstack/manila-api-0" Oct 11 04:47:12 crc kubenswrapper[4798]: I1011 04:47:12.297034 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/9d17a20d-cad0-45bc-86dd-91025c00cfb4-config-data-custom\") pod \"manila-api-0\" (UID: \"9d17a20d-cad0-45bc-86dd-91025c00cfb4\") " pod="openstack/manila-api-0" Oct 11 04:47:12 crc kubenswrapper[4798]: I1011 04:47:12.297148 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9d17a20d-cad0-45bc-86dd-91025c00cfb4-combined-ca-bundle\") pod \"manila-api-0\" (UID: \"9d17a20d-cad0-45bc-86dd-91025c00cfb4\") " pod="openstack/manila-api-0" Oct 11 04:47:12 crc kubenswrapper[4798]: I1011 04:47:12.308809 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hlggx\" (UniqueName: \"kubernetes.io/projected/9d17a20d-cad0-45bc-86dd-91025c00cfb4-kube-api-access-hlggx\") pod \"manila-api-0\" (UID: \"9d17a20d-cad0-45bc-86dd-91025c00cfb4\") " pod="openstack/manila-api-0" Oct 11 04:47:12 crc kubenswrapper[4798]: I1011 04:47:12.501249 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-api-0" Oct 11 04:47:12 crc kubenswrapper[4798]: I1011 04:47:12.625184 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-69655fd4bf-bvxl4"] Oct 11 04:47:12 crc kubenswrapper[4798]: I1011 04:47:12.850307 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-share-share1-0"] Oct 11 04:47:12 crc kubenswrapper[4798]: I1011 04:47:12.903366 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-scheduler-0"] Oct 11 04:47:13 crc kubenswrapper[4798]: W1011 04:47:13.094478 4798 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9d17a20d_cad0_45bc_86dd_91025c00cfb4.slice/crio-bf8fd7f3a2d08871cdca255becc221475f29a18c23d9568cf78005fbba536744 WatchSource:0}: Error finding container bf8fd7f3a2d08871cdca255becc221475f29a18c23d9568cf78005fbba536744: Status 404 returned error can't find the container with id bf8fd7f3a2d08871cdca255becc221475f29a18c23d9568cf78005fbba536744 Oct 11 04:47:13 crc kubenswrapper[4798]: I1011 04:47:13.099086 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-api-0"] Oct 11 04:47:13 crc kubenswrapper[4798]: I1011 04:47:13.299027 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-scheduler-0" event={"ID":"52f182f2-1e00-4c5d-9101-dff47f7c7b85","Type":"ContainerStarted","Data":"d5797e077cca131422c6cd601cb05f8aa054eaff2740607d3bb7024f640405cf"} Oct 11 04:47:13 crc kubenswrapper[4798]: I1011 04:47:13.313111 4798 generic.go:334] "Generic (PLEG): container finished" podID="d5af4cb1-380b-4a81-87ff-b1eb52952273" containerID="57824a2af6a0ac973b45e5efea3ff7d5d8f01f15ca65fe15b0954e9c84157e18" exitCode=0 Oct 11 04:47:13 crc kubenswrapper[4798]: I1011 04:47:13.313481 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-69655fd4bf-bvxl4" event={"ID":"d5af4cb1-380b-4a81-87ff-b1eb52952273","Type":"ContainerDied","Data":"57824a2af6a0ac973b45e5efea3ff7d5d8f01f15ca65fe15b0954e9c84157e18"} Oct 11 04:47:13 crc kubenswrapper[4798]: I1011 04:47:13.313586 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-69655fd4bf-bvxl4" event={"ID":"d5af4cb1-380b-4a81-87ff-b1eb52952273","Type":"ContainerStarted","Data":"f6a9def963b4f74630b595e35a976d8bb7688d00eb5d812c390b078b29897703"} Oct 11 04:47:13 crc kubenswrapper[4798]: I1011 04:47:13.316291 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-api-0" event={"ID":"9d17a20d-cad0-45bc-86dd-91025c00cfb4","Type":"ContainerStarted","Data":"bf8fd7f3a2d08871cdca255becc221475f29a18c23d9568cf78005fbba536744"} Oct 11 04:47:13 crc kubenswrapper[4798]: I1011 04:47:13.318436 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-share-share1-0" event={"ID":"3aa8fa74-0fd3-468d-935b-a819f2383dfb","Type":"ContainerStarted","Data":"600b943243dd68eeb305b6de0179681720e9e7b44ebc8407391aff530fa41e2c"} Oct 11 04:47:13 crc kubenswrapper[4798]: I1011 04:47:13.430846 4798 scope.go:117] "RemoveContainer" containerID="999be6c75e4c41e3d1de7a3a50ee34084c0f4ead3f5f5fdebf32b1d0054c6b5f" Oct 11 04:47:13 crc kubenswrapper[4798]: E1011 04:47:13.431690 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h28s2_openshift-machine-config-operator(42571bc8-2186-4e3b-bba9-28f5a8f364d0)\"" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" Oct 11 04:47:14 crc kubenswrapper[4798]: I1011 04:47:14.331849 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-69655fd4bf-bvxl4" event={"ID":"d5af4cb1-380b-4a81-87ff-b1eb52952273","Type":"ContainerStarted","Data":"9790bf4f3053541ca2dca5705bfb806a665d1e024fd3548ebff3408069e08f25"} Oct 11 04:47:14 crc kubenswrapper[4798]: I1011 04:47:14.332523 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-69655fd4bf-bvxl4" Oct 11 04:47:14 crc kubenswrapper[4798]: I1011 04:47:14.334604 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-api-0" event={"ID":"9d17a20d-cad0-45bc-86dd-91025c00cfb4","Type":"ContainerStarted","Data":"c5038922a392effbc91fa310babee1f1026dc8a6269a1d83a02b734eb4dcda7d"} Oct 11 04:47:14 crc kubenswrapper[4798]: I1011 04:47:14.364834 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-69655fd4bf-bvxl4" podStartSLOduration=3.364814897 podStartE2EDuration="3.364814897s" podCreationTimestamp="2025-10-11 04:47:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 04:47:14.355547609 +0000 UTC m=+3129.691837295" watchObservedRunningTime="2025-10-11 04:47:14.364814897 +0000 UTC m=+3129.701104583" Oct 11 04:47:15 crc kubenswrapper[4798]: I1011 04:47:15.376255 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-scheduler-0" event={"ID":"52f182f2-1e00-4c5d-9101-dff47f7c7b85","Type":"ContainerStarted","Data":"965d3bde34ca8a3bf5bac23e703476a3357bdd8e6b3204be80e54f0960c02648"} Oct 11 04:47:15 crc kubenswrapper[4798]: I1011 04:47:15.376771 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-scheduler-0" event={"ID":"52f182f2-1e00-4c5d-9101-dff47f7c7b85","Type":"ContainerStarted","Data":"d9589fd1679892ecd011355a006f1e4c35bfe2c0a069c79bf8402ccdc209fb7b"} Oct 11 04:47:15 crc kubenswrapper[4798]: I1011 04:47:15.378630 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-api-0" event={"ID":"9d17a20d-cad0-45bc-86dd-91025c00cfb4","Type":"ContainerStarted","Data":"a5afe674af5af75d2ec556bac87009426b17ba12bdbe1274c4ea6def522cdd52"} Oct 11 04:47:15 crc kubenswrapper[4798]: I1011 04:47:15.429480 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/manila-scheduler-0" podStartSLOduration=3.713857233 podStartE2EDuration="4.429432217s" podCreationTimestamp="2025-10-11 04:47:11 +0000 UTC" firstStartedPulling="2025-10-11 04:47:12.91454246 +0000 UTC m=+3128.250832136" lastFinishedPulling="2025-10-11 04:47:13.630117434 +0000 UTC m=+3128.966407120" observedRunningTime="2025-10-11 04:47:15.402922975 +0000 UTC m=+3130.739212671" watchObservedRunningTime="2025-10-11 04:47:15.429432217 +0000 UTC m=+3130.765721913" Oct 11 04:47:15 crc kubenswrapper[4798]: I1011 04:47:15.443813 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/manila-api-0" podStartSLOduration=3.443780084 podStartE2EDuration="3.443780084s" podCreationTimestamp="2025-10-11 04:47:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 04:47:15.435798957 +0000 UTC m=+3130.772088643" watchObservedRunningTime="2025-10-11 04:47:15.443780084 +0000 UTC m=+3130.780069770" Oct 11 04:47:15 crc kubenswrapper[4798]: I1011 04:47:15.580991 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/manila-api-0"] Oct 11 04:47:15 crc kubenswrapper[4798]: I1011 04:47:15.658775 4798 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/horizon-78d45886d-tlzfl" Oct 11 04:47:15 crc kubenswrapper[4798]: I1011 04:47:15.770802 4798 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/horizon-7b5998ffb-nn2rn" Oct 11 04:47:16 crc kubenswrapper[4798]: I1011 04:47:16.414798 4798 generic.go:334] "Generic (PLEG): container finished" podID="778270e6-b796-4692-b341-fb2091a1576e" containerID="5a73af5c3e15c8f56d503cd76525c30ceb207108da1c29c1fe8d8aafd38e337b" exitCode=137 Oct 11 04:47:16 crc kubenswrapper[4798]: I1011 04:47:16.415312 4798 generic.go:334] "Generic (PLEG): container finished" podID="778270e6-b796-4692-b341-fb2091a1576e" containerID="d998f330eaf25e58e7d95131ec3108efec09637f8f8ae87de229125965027f69" exitCode=137 Oct 11 04:47:16 crc kubenswrapper[4798]: I1011 04:47:16.416529 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-bf5b8bcbc-wjw4p" event={"ID":"778270e6-b796-4692-b341-fb2091a1576e","Type":"ContainerDied","Data":"5a73af5c3e15c8f56d503cd76525c30ceb207108da1c29c1fe8d8aafd38e337b"} Oct 11 04:47:16 crc kubenswrapper[4798]: I1011 04:47:16.416568 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-bf5b8bcbc-wjw4p" event={"ID":"778270e6-b796-4692-b341-fb2091a1576e","Type":"ContainerDied","Data":"d998f330eaf25e58e7d95131ec3108efec09637f8f8ae87de229125965027f69"} Oct 11 04:47:16 crc kubenswrapper[4798]: I1011 04:47:16.416604 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/manila-api-0" Oct 11 04:47:16 crc kubenswrapper[4798]: I1011 04:47:16.581587 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-bf5b8bcbc-wjw4p" Oct 11 04:47:16 crc kubenswrapper[4798]: I1011 04:47:16.636750 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/778270e6-b796-4692-b341-fb2091a1576e-config-data\") pod \"778270e6-b796-4692-b341-fb2091a1576e\" (UID: \"778270e6-b796-4692-b341-fb2091a1576e\") " Oct 11 04:47:16 crc kubenswrapper[4798]: I1011 04:47:16.636852 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/778270e6-b796-4692-b341-fb2091a1576e-scripts\") pod \"778270e6-b796-4692-b341-fb2091a1576e\" (UID: \"778270e6-b796-4692-b341-fb2091a1576e\") " Oct 11 04:47:16 crc kubenswrapper[4798]: I1011 04:47:16.636944 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/778270e6-b796-4692-b341-fb2091a1576e-horizon-secret-key\") pod \"778270e6-b796-4692-b341-fb2091a1576e\" (UID: \"778270e6-b796-4692-b341-fb2091a1576e\") " Oct 11 04:47:16 crc kubenswrapper[4798]: I1011 04:47:16.637025 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9r8cg\" (UniqueName: \"kubernetes.io/projected/778270e6-b796-4692-b341-fb2091a1576e-kube-api-access-9r8cg\") pod \"778270e6-b796-4692-b341-fb2091a1576e\" (UID: \"778270e6-b796-4692-b341-fb2091a1576e\") " Oct 11 04:47:16 crc kubenswrapper[4798]: I1011 04:47:16.637338 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/778270e6-b796-4692-b341-fb2091a1576e-logs\") pod \"778270e6-b796-4692-b341-fb2091a1576e\" (UID: \"778270e6-b796-4692-b341-fb2091a1576e\") " Oct 11 04:47:16 crc kubenswrapper[4798]: I1011 04:47:16.638420 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/778270e6-b796-4692-b341-fb2091a1576e-logs" (OuterVolumeSpecName: "logs") pod "778270e6-b796-4692-b341-fb2091a1576e" (UID: "778270e6-b796-4692-b341-fb2091a1576e"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 04:47:16 crc kubenswrapper[4798]: I1011 04:47:16.648584 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/778270e6-b796-4692-b341-fb2091a1576e-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "778270e6-b796-4692-b341-fb2091a1576e" (UID: "778270e6-b796-4692-b341-fb2091a1576e"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:47:16 crc kubenswrapper[4798]: I1011 04:47:16.657823 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/778270e6-b796-4692-b341-fb2091a1576e-kube-api-access-9r8cg" (OuterVolumeSpecName: "kube-api-access-9r8cg") pod "778270e6-b796-4692-b341-fb2091a1576e" (UID: "778270e6-b796-4692-b341-fb2091a1576e"). InnerVolumeSpecName "kube-api-access-9r8cg". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 04:47:16 crc kubenswrapper[4798]: I1011 04:47:16.679853 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/778270e6-b796-4692-b341-fb2091a1576e-scripts" (OuterVolumeSpecName: "scripts") pod "778270e6-b796-4692-b341-fb2091a1576e" (UID: "778270e6-b796-4692-b341-fb2091a1576e"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 04:47:16 crc kubenswrapper[4798]: I1011 04:47:16.681995 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/778270e6-b796-4692-b341-fb2091a1576e-config-data" (OuterVolumeSpecName: "config-data") pod "778270e6-b796-4692-b341-fb2091a1576e" (UID: "778270e6-b796-4692-b341-fb2091a1576e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 04:47:16 crc kubenswrapper[4798]: I1011 04:47:16.746957 4798 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/778270e6-b796-4692-b341-fb2091a1576e-logs\") on node \"crc\" DevicePath \"\"" Oct 11 04:47:16 crc kubenswrapper[4798]: I1011 04:47:16.746995 4798 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/778270e6-b796-4692-b341-fb2091a1576e-config-data\") on node \"crc\" DevicePath \"\"" Oct 11 04:47:16 crc kubenswrapper[4798]: I1011 04:47:16.747004 4798 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/778270e6-b796-4692-b341-fb2091a1576e-scripts\") on node \"crc\" DevicePath \"\"" Oct 11 04:47:16 crc kubenswrapper[4798]: I1011 04:47:16.747016 4798 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/778270e6-b796-4692-b341-fb2091a1576e-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Oct 11 04:47:16 crc kubenswrapper[4798]: I1011 04:47:16.747024 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9r8cg\" (UniqueName: \"kubernetes.io/projected/778270e6-b796-4692-b341-fb2091a1576e-kube-api-access-9r8cg\") on node \"crc\" DevicePath \"\"" Oct 11 04:47:17 crc kubenswrapper[4798]: I1011 04:47:17.447556 4798 generic.go:334] "Generic (PLEG): container finished" podID="de7b40fd-d975-4605-88eb-fe5e00086535" containerID="9c63436abf654e2f49bdff0f01507463c397dc69a0159011b30d7e6837ab9c2a" exitCode=137 Oct 11 04:47:17 crc kubenswrapper[4798]: I1011 04:47:17.447965 4798 generic.go:334] "Generic (PLEG): container finished" podID="de7b40fd-d975-4605-88eb-fe5e00086535" containerID="64bf0f6849af5de6416556618b579376ee499f79de5896582cca83d7a060a6cb" exitCode=137 Oct 11 04:47:17 crc kubenswrapper[4798]: I1011 04:47:17.448017 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-77dc4c7c69-h9tvx" event={"ID":"de7b40fd-d975-4605-88eb-fe5e00086535","Type":"ContainerDied","Data":"9c63436abf654e2f49bdff0f01507463c397dc69a0159011b30d7e6837ab9c2a"} Oct 11 04:47:17 crc kubenswrapper[4798]: I1011 04:47:17.448050 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-77dc4c7c69-h9tvx" event={"ID":"de7b40fd-d975-4605-88eb-fe5e00086535","Type":"ContainerDied","Data":"64bf0f6849af5de6416556618b579376ee499f79de5896582cca83d7a060a6cb"} Oct 11 04:47:17 crc kubenswrapper[4798]: I1011 04:47:17.448060 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-77dc4c7c69-h9tvx" event={"ID":"de7b40fd-d975-4605-88eb-fe5e00086535","Type":"ContainerDied","Data":"3859a952aa5d31349105180abab7a173acedfebbe88f572b87c8534abff79241"} Oct 11 04:47:17 crc kubenswrapper[4798]: I1011 04:47:17.448071 4798 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3859a952aa5d31349105180abab7a173acedfebbe88f572b87c8534abff79241" Oct 11 04:47:17 crc kubenswrapper[4798]: I1011 04:47:17.450147 4798 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/manila-api-0" podUID="9d17a20d-cad0-45bc-86dd-91025c00cfb4" containerName="manila-api-log" containerID="cri-o://c5038922a392effbc91fa310babee1f1026dc8a6269a1d83a02b734eb4dcda7d" gracePeriod=30 Oct 11 04:47:17 crc kubenswrapper[4798]: I1011 04:47:17.450597 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-bf5b8bcbc-wjw4p" Oct 11 04:47:17 crc kubenswrapper[4798]: I1011 04:47:17.451918 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-bf5b8bcbc-wjw4p" event={"ID":"778270e6-b796-4692-b341-fb2091a1576e","Type":"ContainerDied","Data":"70e5667beecebae9f37f265e408448c7cb9c1f0239dfd162f3b76dd881634243"} Oct 11 04:47:17 crc kubenswrapper[4798]: I1011 04:47:17.451998 4798 scope.go:117] "RemoveContainer" containerID="5a73af5c3e15c8f56d503cd76525c30ceb207108da1c29c1fe8d8aafd38e337b" Oct 11 04:47:17 crc kubenswrapper[4798]: I1011 04:47:17.452135 4798 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/manila-api-0" podUID="9d17a20d-cad0-45bc-86dd-91025c00cfb4" containerName="manila-api" containerID="cri-o://a5afe674af5af75d2ec556bac87009426b17ba12bdbe1274c4ea6def522cdd52" gracePeriod=30 Oct 11 04:47:17 crc kubenswrapper[4798]: I1011 04:47:17.517614 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-77dc4c7c69-h9tvx" Oct 11 04:47:17 crc kubenswrapper[4798]: I1011 04:47:17.569241 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sx7r4\" (UniqueName: \"kubernetes.io/projected/de7b40fd-d975-4605-88eb-fe5e00086535-kube-api-access-sx7r4\") pod \"de7b40fd-d975-4605-88eb-fe5e00086535\" (UID: \"de7b40fd-d975-4605-88eb-fe5e00086535\") " Oct 11 04:47:17 crc kubenswrapper[4798]: I1011 04:47:17.569310 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/de7b40fd-d975-4605-88eb-fe5e00086535-config-data\") pod \"de7b40fd-d975-4605-88eb-fe5e00086535\" (UID: \"de7b40fd-d975-4605-88eb-fe5e00086535\") " Oct 11 04:47:17 crc kubenswrapper[4798]: I1011 04:47:17.569386 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/de7b40fd-d975-4605-88eb-fe5e00086535-logs\") pod \"de7b40fd-d975-4605-88eb-fe5e00086535\" (UID: \"de7b40fd-d975-4605-88eb-fe5e00086535\") " Oct 11 04:47:17 crc kubenswrapper[4798]: I1011 04:47:17.569521 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/de7b40fd-d975-4605-88eb-fe5e00086535-horizon-secret-key\") pod \"de7b40fd-d975-4605-88eb-fe5e00086535\" (UID: \"de7b40fd-d975-4605-88eb-fe5e00086535\") " Oct 11 04:47:17 crc kubenswrapper[4798]: I1011 04:47:17.569623 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/de7b40fd-d975-4605-88eb-fe5e00086535-scripts\") pod \"de7b40fd-d975-4605-88eb-fe5e00086535\" (UID: \"de7b40fd-d975-4605-88eb-fe5e00086535\") " Oct 11 04:47:17 crc kubenswrapper[4798]: I1011 04:47:17.573613 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/de7b40fd-d975-4605-88eb-fe5e00086535-logs" (OuterVolumeSpecName: "logs") pod "de7b40fd-d975-4605-88eb-fe5e00086535" (UID: "de7b40fd-d975-4605-88eb-fe5e00086535"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 04:47:17 crc kubenswrapper[4798]: I1011 04:47:17.578513 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/de7b40fd-d975-4605-88eb-fe5e00086535-kube-api-access-sx7r4" (OuterVolumeSpecName: "kube-api-access-sx7r4") pod "de7b40fd-d975-4605-88eb-fe5e00086535" (UID: "de7b40fd-d975-4605-88eb-fe5e00086535"). InnerVolumeSpecName "kube-api-access-sx7r4". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 04:47:17 crc kubenswrapper[4798]: I1011 04:47:17.583875 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/de7b40fd-d975-4605-88eb-fe5e00086535-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "de7b40fd-d975-4605-88eb-fe5e00086535" (UID: "de7b40fd-d975-4605-88eb-fe5e00086535"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:47:17 crc kubenswrapper[4798]: I1011 04:47:17.609115 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/de7b40fd-d975-4605-88eb-fe5e00086535-config-data" (OuterVolumeSpecName: "config-data") pod "de7b40fd-d975-4605-88eb-fe5e00086535" (UID: "de7b40fd-d975-4605-88eb-fe5e00086535"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 04:47:17 crc kubenswrapper[4798]: I1011 04:47:17.634279 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/de7b40fd-d975-4605-88eb-fe5e00086535-scripts" (OuterVolumeSpecName: "scripts") pod "de7b40fd-d975-4605-88eb-fe5e00086535" (UID: "de7b40fd-d975-4605-88eb-fe5e00086535"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 04:47:17 crc kubenswrapper[4798]: I1011 04:47:17.672449 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sx7r4\" (UniqueName: \"kubernetes.io/projected/de7b40fd-d975-4605-88eb-fe5e00086535-kube-api-access-sx7r4\") on node \"crc\" DevicePath \"\"" Oct 11 04:47:17 crc kubenswrapper[4798]: I1011 04:47:17.672491 4798 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/de7b40fd-d975-4605-88eb-fe5e00086535-config-data\") on node \"crc\" DevicePath \"\"" Oct 11 04:47:17 crc kubenswrapper[4798]: I1011 04:47:17.672504 4798 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/de7b40fd-d975-4605-88eb-fe5e00086535-logs\") on node \"crc\" DevicePath \"\"" Oct 11 04:47:17 crc kubenswrapper[4798]: I1011 04:47:17.672514 4798 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/de7b40fd-d975-4605-88eb-fe5e00086535-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Oct 11 04:47:17 crc kubenswrapper[4798]: I1011 04:47:17.672523 4798 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/de7b40fd-d975-4605-88eb-fe5e00086535-scripts\") on node \"crc\" DevicePath \"\"" Oct 11 04:47:17 crc kubenswrapper[4798]: I1011 04:47:17.719404 4798 scope.go:117] "RemoveContainer" containerID="d998f330eaf25e58e7d95131ec3108efec09637f8f8ae87de229125965027f69" Oct 11 04:47:17 crc kubenswrapper[4798]: I1011 04:47:17.808260 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/horizon-78d45886d-tlzfl" Oct 11 04:47:17 crc kubenswrapper[4798]: I1011 04:47:17.919024 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/horizon-7b5998ffb-nn2rn" Oct 11 04:47:18 crc kubenswrapper[4798]: I1011 04:47:18.002466 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-78d45886d-tlzfl"] Oct 11 04:47:18 crc kubenswrapper[4798]: I1011 04:47:18.314369 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/manila-api-0" Oct 11 04:47:18 crc kubenswrapper[4798]: I1011 04:47:18.398194 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/9d17a20d-cad0-45bc-86dd-91025c00cfb4-etc-machine-id\") pod \"9d17a20d-cad0-45bc-86dd-91025c00cfb4\" (UID: \"9d17a20d-cad0-45bc-86dd-91025c00cfb4\") " Oct 11 04:47:18 crc kubenswrapper[4798]: I1011 04:47:18.398359 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9d17a20d-cad0-45bc-86dd-91025c00cfb4-config-data\") pod \"9d17a20d-cad0-45bc-86dd-91025c00cfb4\" (UID: \"9d17a20d-cad0-45bc-86dd-91025c00cfb4\") " Oct 11 04:47:18 crc kubenswrapper[4798]: I1011 04:47:18.398372 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/9d17a20d-cad0-45bc-86dd-91025c00cfb4-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "9d17a20d-cad0-45bc-86dd-91025c00cfb4" (UID: "9d17a20d-cad0-45bc-86dd-91025c00cfb4"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 11 04:47:18 crc kubenswrapper[4798]: I1011 04:47:18.398478 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9d17a20d-cad0-45bc-86dd-91025c00cfb4-combined-ca-bundle\") pod \"9d17a20d-cad0-45bc-86dd-91025c00cfb4\" (UID: \"9d17a20d-cad0-45bc-86dd-91025c00cfb4\") " Oct 11 04:47:18 crc kubenswrapper[4798]: I1011 04:47:18.398567 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/9d17a20d-cad0-45bc-86dd-91025c00cfb4-config-data-custom\") pod \"9d17a20d-cad0-45bc-86dd-91025c00cfb4\" (UID: \"9d17a20d-cad0-45bc-86dd-91025c00cfb4\") " Oct 11 04:47:18 crc kubenswrapper[4798]: I1011 04:47:18.398693 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9d17a20d-cad0-45bc-86dd-91025c00cfb4-scripts\") pod \"9d17a20d-cad0-45bc-86dd-91025c00cfb4\" (UID: \"9d17a20d-cad0-45bc-86dd-91025c00cfb4\") " Oct 11 04:47:18 crc kubenswrapper[4798]: I1011 04:47:18.398806 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9d17a20d-cad0-45bc-86dd-91025c00cfb4-logs\") pod \"9d17a20d-cad0-45bc-86dd-91025c00cfb4\" (UID: \"9d17a20d-cad0-45bc-86dd-91025c00cfb4\") " Oct 11 04:47:18 crc kubenswrapper[4798]: I1011 04:47:18.398879 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hlggx\" (UniqueName: \"kubernetes.io/projected/9d17a20d-cad0-45bc-86dd-91025c00cfb4-kube-api-access-hlggx\") pod \"9d17a20d-cad0-45bc-86dd-91025c00cfb4\" (UID: \"9d17a20d-cad0-45bc-86dd-91025c00cfb4\") " Oct 11 04:47:18 crc kubenswrapper[4798]: I1011 04:47:18.399353 4798 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/9d17a20d-cad0-45bc-86dd-91025c00cfb4-etc-machine-id\") on node \"crc\" DevicePath \"\"" Oct 11 04:47:18 crc kubenswrapper[4798]: I1011 04:47:18.401954 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9d17a20d-cad0-45bc-86dd-91025c00cfb4-logs" (OuterVolumeSpecName: "logs") pod "9d17a20d-cad0-45bc-86dd-91025c00cfb4" (UID: "9d17a20d-cad0-45bc-86dd-91025c00cfb4"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 04:47:18 crc kubenswrapper[4798]: I1011 04:47:18.406153 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9d17a20d-cad0-45bc-86dd-91025c00cfb4-kube-api-access-hlggx" (OuterVolumeSpecName: "kube-api-access-hlggx") pod "9d17a20d-cad0-45bc-86dd-91025c00cfb4" (UID: "9d17a20d-cad0-45bc-86dd-91025c00cfb4"). InnerVolumeSpecName "kube-api-access-hlggx". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 04:47:18 crc kubenswrapper[4798]: I1011 04:47:18.407503 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9d17a20d-cad0-45bc-86dd-91025c00cfb4-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "9d17a20d-cad0-45bc-86dd-91025c00cfb4" (UID: "9d17a20d-cad0-45bc-86dd-91025c00cfb4"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:47:18 crc kubenswrapper[4798]: I1011 04:47:18.409504 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9d17a20d-cad0-45bc-86dd-91025c00cfb4-scripts" (OuterVolumeSpecName: "scripts") pod "9d17a20d-cad0-45bc-86dd-91025c00cfb4" (UID: "9d17a20d-cad0-45bc-86dd-91025c00cfb4"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:47:18 crc kubenswrapper[4798]: I1011 04:47:18.434566 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9d17a20d-cad0-45bc-86dd-91025c00cfb4-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "9d17a20d-cad0-45bc-86dd-91025c00cfb4" (UID: "9d17a20d-cad0-45bc-86dd-91025c00cfb4"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:47:18 crc kubenswrapper[4798]: I1011 04:47:18.468522 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9d17a20d-cad0-45bc-86dd-91025c00cfb4-config-data" (OuterVolumeSpecName: "config-data") pod "9d17a20d-cad0-45bc-86dd-91025c00cfb4" (UID: "9d17a20d-cad0-45bc-86dd-91025c00cfb4"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:47:18 crc kubenswrapper[4798]: I1011 04:47:18.472801 4798 generic.go:334] "Generic (PLEG): container finished" podID="9d17a20d-cad0-45bc-86dd-91025c00cfb4" containerID="a5afe674af5af75d2ec556bac87009426b17ba12bdbe1274c4ea6def522cdd52" exitCode=0 Oct 11 04:47:18 crc kubenswrapper[4798]: I1011 04:47:18.472841 4798 generic.go:334] "Generic (PLEG): container finished" podID="9d17a20d-cad0-45bc-86dd-91025c00cfb4" containerID="c5038922a392effbc91fa310babee1f1026dc8a6269a1d83a02b734eb4dcda7d" exitCode=143 Oct 11 04:47:18 crc kubenswrapper[4798]: I1011 04:47:18.473059 4798 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-78d45886d-tlzfl" podUID="0c09ff0d-de2b-4e1d-b39f-f467c8b03fb7" containerName="horizon-log" containerID="cri-o://e5a0bf3052f9a7a689c723780253fef10a5e615d7ab1c128a44088070fa7c599" gracePeriod=30 Oct 11 04:47:18 crc kubenswrapper[4798]: I1011 04:47:18.473457 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/manila-api-0" Oct 11 04:47:18 crc kubenswrapper[4798]: I1011 04:47:18.475510 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-api-0" event={"ID":"9d17a20d-cad0-45bc-86dd-91025c00cfb4","Type":"ContainerDied","Data":"a5afe674af5af75d2ec556bac87009426b17ba12bdbe1274c4ea6def522cdd52"} Oct 11 04:47:18 crc kubenswrapper[4798]: I1011 04:47:18.475575 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-api-0" event={"ID":"9d17a20d-cad0-45bc-86dd-91025c00cfb4","Type":"ContainerDied","Data":"c5038922a392effbc91fa310babee1f1026dc8a6269a1d83a02b734eb4dcda7d"} Oct 11 04:47:18 crc kubenswrapper[4798]: I1011 04:47:18.475589 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-api-0" event={"ID":"9d17a20d-cad0-45bc-86dd-91025c00cfb4","Type":"ContainerDied","Data":"bf8fd7f3a2d08871cdca255becc221475f29a18c23d9568cf78005fbba536744"} Oct 11 04:47:18 crc kubenswrapper[4798]: I1011 04:47:18.475610 4798 scope.go:117] "RemoveContainer" containerID="a5afe674af5af75d2ec556bac87009426b17ba12bdbe1274c4ea6def522cdd52" Oct 11 04:47:18 crc kubenswrapper[4798]: I1011 04:47:18.475840 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-77dc4c7c69-h9tvx" Oct 11 04:47:18 crc kubenswrapper[4798]: I1011 04:47:18.479503 4798 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-78d45886d-tlzfl" podUID="0c09ff0d-de2b-4e1d-b39f-f467c8b03fb7" containerName="horizon" containerID="cri-o://6e79ada2ce3c5b65f6f244fb7a98a83d07af92873ba6a0cf203af681fb65f44d" gracePeriod=30 Oct 11 04:47:18 crc kubenswrapper[4798]: I1011 04:47:18.501797 4798 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9d17a20d-cad0-45bc-86dd-91025c00cfb4-scripts\") on node \"crc\" DevicePath \"\"" Oct 11 04:47:18 crc kubenswrapper[4798]: I1011 04:47:18.501835 4798 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9d17a20d-cad0-45bc-86dd-91025c00cfb4-logs\") on node \"crc\" DevicePath \"\"" Oct 11 04:47:18 crc kubenswrapper[4798]: I1011 04:47:18.501849 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hlggx\" (UniqueName: \"kubernetes.io/projected/9d17a20d-cad0-45bc-86dd-91025c00cfb4-kube-api-access-hlggx\") on node \"crc\" DevicePath \"\"" Oct 11 04:47:18 crc kubenswrapper[4798]: I1011 04:47:18.501862 4798 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9d17a20d-cad0-45bc-86dd-91025c00cfb4-config-data\") on node \"crc\" DevicePath \"\"" Oct 11 04:47:18 crc kubenswrapper[4798]: I1011 04:47:18.501872 4798 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9d17a20d-cad0-45bc-86dd-91025c00cfb4-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 11 04:47:18 crc kubenswrapper[4798]: I1011 04:47:18.501882 4798 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/9d17a20d-cad0-45bc-86dd-91025c00cfb4-config-data-custom\") on node \"crc\" DevicePath \"\"" Oct 11 04:47:18 crc kubenswrapper[4798]: I1011 04:47:18.544748 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/manila-api-0"] Oct 11 04:47:18 crc kubenswrapper[4798]: I1011 04:47:18.558690 4798 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/manila-api-0"] Oct 11 04:47:18 crc kubenswrapper[4798]: I1011 04:47:18.560306 4798 scope.go:117] "RemoveContainer" containerID="c5038922a392effbc91fa310babee1f1026dc8a6269a1d83a02b734eb4dcda7d" Oct 11 04:47:18 crc kubenswrapper[4798]: I1011 04:47:18.576986 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-77dc4c7c69-h9tvx"] Oct 11 04:47:18 crc kubenswrapper[4798]: I1011 04:47:18.586897 4798 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-77dc4c7c69-h9tvx"] Oct 11 04:47:18 crc kubenswrapper[4798]: I1011 04:47:18.592589 4798 scope.go:117] "RemoveContainer" containerID="a5afe674af5af75d2ec556bac87009426b17ba12bdbe1274c4ea6def522cdd52" Oct 11 04:47:18 crc kubenswrapper[4798]: E1011 04:47:18.596952 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a5afe674af5af75d2ec556bac87009426b17ba12bdbe1274c4ea6def522cdd52\": container with ID starting with a5afe674af5af75d2ec556bac87009426b17ba12bdbe1274c4ea6def522cdd52 not found: ID does not exist" containerID="a5afe674af5af75d2ec556bac87009426b17ba12bdbe1274c4ea6def522cdd52" Oct 11 04:47:18 crc kubenswrapper[4798]: I1011 04:47:18.596994 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a5afe674af5af75d2ec556bac87009426b17ba12bdbe1274c4ea6def522cdd52"} err="failed to get container status \"a5afe674af5af75d2ec556bac87009426b17ba12bdbe1274c4ea6def522cdd52\": rpc error: code = NotFound desc = could not find container \"a5afe674af5af75d2ec556bac87009426b17ba12bdbe1274c4ea6def522cdd52\": container with ID starting with a5afe674af5af75d2ec556bac87009426b17ba12bdbe1274c4ea6def522cdd52 not found: ID does not exist" Oct 11 04:47:18 crc kubenswrapper[4798]: I1011 04:47:18.597021 4798 scope.go:117] "RemoveContainer" containerID="c5038922a392effbc91fa310babee1f1026dc8a6269a1d83a02b734eb4dcda7d" Oct 11 04:47:18 crc kubenswrapper[4798]: I1011 04:47:18.597101 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/manila-api-0"] Oct 11 04:47:18 crc kubenswrapper[4798]: E1011 04:47:18.597647 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9d17a20d-cad0-45bc-86dd-91025c00cfb4" containerName="manila-api" Oct 11 04:47:18 crc kubenswrapper[4798]: I1011 04:47:18.597666 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="9d17a20d-cad0-45bc-86dd-91025c00cfb4" containerName="manila-api" Oct 11 04:47:18 crc kubenswrapper[4798]: E1011 04:47:18.597732 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="778270e6-b796-4692-b341-fb2091a1576e" containerName="horizon-log" Oct 11 04:47:18 crc kubenswrapper[4798]: I1011 04:47:18.597740 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="778270e6-b796-4692-b341-fb2091a1576e" containerName="horizon-log" Oct 11 04:47:18 crc kubenswrapper[4798]: E1011 04:47:18.597759 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9d17a20d-cad0-45bc-86dd-91025c00cfb4" containerName="manila-api-log" Oct 11 04:47:18 crc kubenswrapper[4798]: I1011 04:47:18.597766 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="9d17a20d-cad0-45bc-86dd-91025c00cfb4" containerName="manila-api-log" Oct 11 04:47:18 crc kubenswrapper[4798]: E1011 04:47:18.597776 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="de7b40fd-d975-4605-88eb-fe5e00086535" containerName="horizon" Oct 11 04:47:18 crc kubenswrapper[4798]: I1011 04:47:18.597782 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="de7b40fd-d975-4605-88eb-fe5e00086535" containerName="horizon" Oct 11 04:47:18 crc kubenswrapper[4798]: E1011 04:47:18.597796 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="de7b40fd-d975-4605-88eb-fe5e00086535" containerName="horizon-log" Oct 11 04:47:18 crc kubenswrapper[4798]: I1011 04:47:18.597802 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="de7b40fd-d975-4605-88eb-fe5e00086535" containerName="horizon-log" Oct 11 04:47:18 crc kubenswrapper[4798]: E1011 04:47:18.597815 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="778270e6-b796-4692-b341-fb2091a1576e" containerName="horizon" Oct 11 04:47:18 crc kubenswrapper[4798]: I1011 04:47:18.597821 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="778270e6-b796-4692-b341-fb2091a1576e" containerName="horizon" Oct 11 04:47:18 crc kubenswrapper[4798]: I1011 04:47:18.598003 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="778270e6-b796-4692-b341-fb2091a1576e" containerName="horizon-log" Oct 11 04:47:18 crc kubenswrapper[4798]: I1011 04:47:18.598021 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="9d17a20d-cad0-45bc-86dd-91025c00cfb4" containerName="manila-api-log" Oct 11 04:47:18 crc kubenswrapper[4798]: I1011 04:47:18.598030 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="de7b40fd-d975-4605-88eb-fe5e00086535" containerName="horizon-log" Oct 11 04:47:18 crc kubenswrapper[4798]: I1011 04:47:18.598042 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="778270e6-b796-4692-b341-fb2091a1576e" containerName="horizon" Oct 11 04:47:18 crc kubenswrapper[4798]: I1011 04:47:18.598061 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="de7b40fd-d975-4605-88eb-fe5e00086535" containerName="horizon" Oct 11 04:47:18 crc kubenswrapper[4798]: I1011 04:47:18.598072 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="9d17a20d-cad0-45bc-86dd-91025c00cfb4" containerName="manila-api" Oct 11 04:47:18 crc kubenswrapper[4798]: I1011 04:47:18.599351 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-api-0" Oct 11 04:47:18 crc kubenswrapper[4798]: E1011 04:47:18.600675 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c5038922a392effbc91fa310babee1f1026dc8a6269a1d83a02b734eb4dcda7d\": container with ID starting with c5038922a392effbc91fa310babee1f1026dc8a6269a1d83a02b734eb4dcda7d not found: ID does not exist" containerID="c5038922a392effbc91fa310babee1f1026dc8a6269a1d83a02b734eb4dcda7d" Oct 11 04:47:18 crc kubenswrapper[4798]: I1011 04:47:18.600728 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c5038922a392effbc91fa310babee1f1026dc8a6269a1d83a02b734eb4dcda7d"} err="failed to get container status \"c5038922a392effbc91fa310babee1f1026dc8a6269a1d83a02b734eb4dcda7d\": rpc error: code = NotFound desc = could not find container \"c5038922a392effbc91fa310babee1f1026dc8a6269a1d83a02b734eb4dcda7d\": container with ID starting with c5038922a392effbc91fa310babee1f1026dc8a6269a1d83a02b734eb4dcda7d not found: ID does not exist" Oct 11 04:47:18 crc kubenswrapper[4798]: I1011 04:47:18.600762 4798 scope.go:117] "RemoveContainer" containerID="a5afe674af5af75d2ec556bac87009426b17ba12bdbe1274c4ea6def522cdd52" Oct 11 04:47:18 crc kubenswrapper[4798]: I1011 04:47:18.601071 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a5afe674af5af75d2ec556bac87009426b17ba12bdbe1274c4ea6def522cdd52"} err="failed to get container status \"a5afe674af5af75d2ec556bac87009426b17ba12bdbe1274c4ea6def522cdd52\": rpc error: code = NotFound desc = could not find container \"a5afe674af5af75d2ec556bac87009426b17ba12bdbe1274c4ea6def522cdd52\": container with ID starting with a5afe674af5af75d2ec556bac87009426b17ba12bdbe1274c4ea6def522cdd52 not found: ID does not exist" Oct 11 04:47:18 crc kubenswrapper[4798]: I1011 04:47:18.601093 4798 scope.go:117] "RemoveContainer" containerID="c5038922a392effbc91fa310babee1f1026dc8a6269a1d83a02b734eb4dcda7d" Oct 11 04:47:18 crc kubenswrapper[4798]: I1011 04:47:18.601336 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c5038922a392effbc91fa310babee1f1026dc8a6269a1d83a02b734eb4dcda7d"} err="failed to get container status \"c5038922a392effbc91fa310babee1f1026dc8a6269a1d83a02b734eb4dcda7d\": rpc error: code = NotFound desc = could not find container \"c5038922a392effbc91fa310babee1f1026dc8a6269a1d83a02b734eb4dcda7d\": container with ID starting with c5038922a392effbc91fa310babee1f1026dc8a6269a1d83a02b734eb4dcda7d not found: ID does not exist" Oct 11 04:47:18 crc kubenswrapper[4798]: I1011 04:47:18.604524 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-manila-public-svc" Oct 11 04:47:18 crc kubenswrapper[4798]: I1011 04:47:18.604741 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"manila-api-config-data" Oct 11 04:47:18 crc kubenswrapper[4798]: I1011 04:47:18.604856 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-manila-internal-svc" Oct 11 04:47:18 crc kubenswrapper[4798]: I1011 04:47:18.614283 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-api-0"] Oct 11 04:47:18 crc kubenswrapper[4798]: I1011 04:47:18.706701 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5a696dfc-1e8c-4d2b-9f0e-70bea3ed6d01-scripts\") pod \"manila-api-0\" (UID: \"5a696dfc-1e8c-4d2b-9f0e-70bea3ed6d01\") " pod="openstack/manila-api-0" Oct 11 04:47:18 crc kubenswrapper[4798]: I1011 04:47:18.707025 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/5a696dfc-1e8c-4d2b-9f0e-70bea3ed6d01-internal-tls-certs\") pod \"manila-api-0\" (UID: \"5a696dfc-1e8c-4d2b-9f0e-70bea3ed6d01\") " pod="openstack/manila-api-0" Oct 11 04:47:18 crc kubenswrapper[4798]: I1011 04:47:18.707163 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8hhjw\" (UniqueName: \"kubernetes.io/projected/5a696dfc-1e8c-4d2b-9f0e-70bea3ed6d01-kube-api-access-8hhjw\") pod \"manila-api-0\" (UID: \"5a696dfc-1e8c-4d2b-9f0e-70bea3ed6d01\") " pod="openstack/manila-api-0" Oct 11 04:47:18 crc kubenswrapper[4798]: I1011 04:47:18.707279 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/5a696dfc-1e8c-4d2b-9f0e-70bea3ed6d01-config-data-custom\") pod \"manila-api-0\" (UID: \"5a696dfc-1e8c-4d2b-9f0e-70bea3ed6d01\") " pod="openstack/manila-api-0" Oct 11 04:47:18 crc kubenswrapper[4798]: I1011 04:47:18.707416 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5a696dfc-1e8c-4d2b-9f0e-70bea3ed6d01-logs\") pod \"manila-api-0\" (UID: \"5a696dfc-1e8c-4d2b-9f0e-70bea3ed6d01\") " pod="openstack/manila-api-0" Oct 11 04:47:18 crc kubenswrapper[4798]: I1011 04:47:18.707513 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/5a696dfc-1e8c-4d2b-9f0e-70bea3ed6d01-public-tls-certs\") pod \"manila-api-0\" (UID: \"5a696dfc-1e8c-4d2b-9f0e-70bea3ed6d01\") " pod="openstack/manila-api-0" Oct 11 04:47:18 crc kubenswrapper[4798]: I1011 04:47:18.707642 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5a696dfc-1e8c-4d2b-9f0e-70bea3ed6d01-combined-ca-bundle\") pod \"manila-api-0\" (UID: \"5a696dfc-1e8c-4d2b-9f0e-70bea3ed6d01\") " pod="openstack/manila-api-0" Oct 11 04:47:18 crc kubenswrapper[4798]: I1011 04:47:18.707750 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5a696dfc-1e8c-4d2b-9f0e-70bea3ed6d01-config-data\") pod \"manila-api-0\" (UID: \"5a696dfc-1e8c-4d2b-9f0e-70bea3ed6d01\") " pod="openstack/manila-api-0" Oct 11 04:47:18 crc kubenswrapper[4798]: I1011 04:47:18.707967 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/5a696dfc-1e8c-4d2b-9f0e-70bea3ed6d01-etc-machine-id\") pod \"manila-api-0\" (UID: \"5a696dfc-1e8c-4d2b-9f0e-70bea3ed6d01\") " pod="openstack/manila-api-0" Oct 11 04:47:18 crc kubenswrapper[4798]: I1011 04:47:18.810461 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5a696dfc-1e8c-4d2b-9f0e-70bea3ed6d01-combined-ca-bundle\") pod \"manila-api-0\" (UID: \"5a696dfc-1e8c-4d2b-9f0e-70bea3ed6d01\") " pod="openstack/manila-api-0" Oct 11 04:47:18 crc kubenswrapper[4798]: I1011 04:47:18.810511 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5a696dfc-1e8c-4d2b-9f0e-70bea3ed6d01-config-data\") pod \"manila-api-0\" (UID: \"5a696dfc-1e8c-4d2b-9f0e-70bea3ed6d01\") " pod="openstack/manila-api-0" Oct 11 04:47:18 crc kubenswrapper[4798]: I1011 04:47:18.810610 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/5a696dfc-1e8c-4d2b-9f0e-70bea3ed6d01-etc-machine-id\") pod \"manila-api-0\" (UID: \"5a696dfc-1e8c-4d2b-9f0e-70bea3ed6d01\") " pod="openstack/manila-api-0" Oct 11 04:47:18 crc kubenswrapper[4798]: I1011 04:47:18.810672 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5a696dfc-1e8c-4d2b-9f0e-70bea3ed6d01-scripts\") pod \"manila-api-0\" (UID: \"5a696dfc-1e8c-4d2b-9f0e-70bea3ed6d01\") " pod="openstack/manila-api-0" Oct 11 04:47:18 crc kubenswrapper[4798]: I1011 04:47:18.810688 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/5a696dfc-1e8c-4d2b-9f0e-70bea3ed6d01-internal-tls-certs\") pod \"manila-api-0\" (UID: \"5a696dfc-1e8c-4d2b-9f0e-70bea3ed6d01\") " pod="openstack/manila-api-0" Oct 11 04:47:18 crc kubenswrapper[4798]: I1011 04:47:18.810711 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8hhjw\" (UniqueName: \"kubernetes.io/projected/5a696dfc-1e8c-4d2b-9f0e-70bea3ed6d01-kube-api-access-8hhjw\") pod \"manila-api-0\" (UID: \"5a696dfc-1e8c-4d2b-9f0e-70bea3ed6d01\") " pod="openstack/manila-api-0" Oct 11 04:47:18 crc kubenswrapper[4798]: I1011 04:47:18.810738 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/5a696dfc-1e8c-4d2b-9f0e-70bea3ed6d01-config-data-custom\") pod \"manila-api-0\" (UID: \"5a696dfc-1e8c-4d2b-9f0e-70bea3ed6d01\") " pod="openstack/manila-api-0" Oct 11 04:47:18 crc kubenswrapper[4798]: I1011 04:47:18.810777 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5a696dfc-1e8c-4d2b-9f0e-70bea3ed6d01-logs\") pod \"manila-api-0\" (UID: \"5a696dfc-1e8c-4d2b-9f0e-70bea3ed6d01\") " pod="openstack/manila-api-0" Oct 11 04:47:18 crc kubenswrapper[4798]: I1011 04:47:18.810792 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/5a696dfc-1e8c-4d2b-9f0e-70bea3ed6d01-public-tls-certs\") pod \"manila-api-0\" (UID: \"5a696dfc-1e8c-4d2b-9f0e-70bea3ed6d01\") " pod="openstack/manila-api-0" Oct 11 04:47:18 crc kubenswrapper[4798]: I1011 04:47:18.811369 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/5a696dfc-1e8c-4d2b-9f0e-70bea3ed6d01-etc-machine-id\") pod \"manila-api-0\" (UID: \"5a696dfc-1e8c-4d2b-9f0e-70bea3ed6d01\") " pod="openstack/manila-api-0" Oct 11 04:47:18 crc kubenswrapper[4798]: I1011 04:47:18.811864 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5a696dfc-1e8c-4d2b-9f0e-70bea3ed6d01-logs\") pod \"manila-api-0\" (UID: \"5a696dfc-1e8c-4d2b-9f0e-70bea3ed6d01\") " pod="openstack/manila-api-0" Oct 11 04:47:18 crc kubenswrapper[4798]: I1011 04:47:18.822739 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5a696dfc-1e8c-4d2b-9f0e-70bea3ed6d01-combined-ca-bundle\") pod \"manila-api-0\" (UID: \"5a696dfc-1e8c-4d2b-9f0e-70bea3ed6d01\") " pod="openstack/manila-api-0" Oct 11 04:47:18 crc kubenswrapper[4798]: I1011 04:47:18.822742 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/5a696dfc-1e8c-4d2b-9f0e-70bea3ed6d01-internal-tls-certs\") pod \"manila-api-0\" (UID: \"5a696dfc-1e8c-4d2b-9f0e-70bea3ed6d01\") " pod="openstack/manila-api-0" Oct 11 04:47:18 crc kubenswrapper[4798]: I1011 04:47:18.822978 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5a696dfc-1e8c-4d2b-9f0e-70bea3ed6d01-config-data\") pod \"manila-api-0\" (UID: \"5a696dfc-1e8c-4d2b-9f0e-70bea3ed6d01\") " pod="openstack/manila-api-0" Oct 11 04:47:18 crc kubenswrapper[4798]: I1011 04:47:18.823078 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/5a696dfc-1e8c-4d2b-9f0e-70bea3ed6d01-public-tls-certs\") pod \"manila-api-0\" (UID: \"5a696dfc-1e8c-4d2b-9f0e-70bea3ed6d01\") " pod="openstack/manila-api-0" Oct 11 04:47:18 crc kubenswrapper[4798]: I1011 04:47:18.823158 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5a696dfc-1e8c-4d2b-9f0e-70bea3ed6d01-scripts\") pod \"manila-api-0\" (UID: \"5a696dfc-1e8c-4d2b-9f0e-70bea3ed6d01\") " pod="openstack/manila-api-0" Oct 11 04:47:18 crc kubenswrapper[4798]: I1011 04:47:18.823997 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/5a696dfc-1e8c-4d2b-9f0e-70bea3ed6d01-config-data-custom\") pod \"manila-api-0\" (UID: \"5a696dfc-1e8c-4d2b-9f0e-70bea3ed6d01\") " pod="openstack/manila-api-0" Oct 11 04:47:18 crc kubenswrapper[4798]: I1011 04:47:18.827172 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8hhjw\" (UniqueName: \"kubernetes.io/projected/5a696dfc-1e8c-4d2b-9f0e-70bea3ed6d01-kube-api-access-8hhjw\") pod \"manila-api-0\" (UID: \"5a696dfc-1e8c-4d2b-9f0e-70bea3ed6d01\") " pod="openstack/manila-api-0" Oct 11 04:47:18 crc kubenswrapper[4798]: I1011 04:47:18.932330 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-api-0" Oct 11 04:47:19 crc kubenswrapper[4798]: I1011 04:47:19.339530 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Oct 11 04:47:19 crc kubenswrapper[4798]: I1011 04:47:19.339848 4798 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="679fd723-9ff7-4d15-b769-3d709ed1f9ab" containerName="ceilometer-central-agent" containerID="cri-o://1b10a01ae2b31c73f5fd84fdb2eaf23fc80e7eb2392c49efe6922e51d1f1d332" gracePeriod=30 Oct 11 04:47:19 crc kubenswrapper[4798]: I1011 04:47:19.339988 4798 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="679fd723-9ff7-4d15-b769-3d709ed1f9ab" containerName="sg-core" containerID="cri-o://6a3e25816582fceeef3240445751d55263c4943af22edc52f7d757cb05fad6bb" gracePeriod=30 Oct 11 04:47:19 crc kubenswrapper[4798]: I1011 04:47:19.339937 4798 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="679fd723-9ff7-4d15-b769-3d709ed1f9ab" containerName="proxy-httpd" containerID="cri-o://79a4ab8d75501aa2ffa0b9b593d98a34639de7eb61c673ce43a444369f894791" gracePeriod=30 Oct 11 04:47:19 crc kubenswrapper[4798]: I1011 04:47:19.340066 4798 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="679fd723-9ff7-4d15-b769-3d709ed1f9ab" containerName="ceilometer-notification-agent" containerID="cri-o://d90b580096c09653e41e84e5f964e8af6a915f538f16413ff9076927b16fc167" gracePeriod=30 Oct 11 04:47:19 crc kubenswrapper[4798]: I1011 04:47:19.437572 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9d17a20d-cad0-45bc-86dd-91025c00cfb4" path="/var/lib/kubelet/pods/9d17a20d-cad0-45bc-86dd-91025c00cfb4/volumes" Oct 11 04:47:19 crc kubenswrapper[4798]: I1011 04:47:19.438980 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="de7b40fd-d975-4605-88eb-fe5e00086535" path="/var/lib/kubelet/pods/de7b40fd-d975-4605-88eb-fe5e00086535/volumes" Oct 11 04:47:19 crc kubenswrapper[4798]: I1011 04:47:19.503000 4798 generic.go:334] "Generic (PLEG): container finished" podID="679fd723-9ff7-4d15-b769-3d709ed1f9ab" containerID="79a4ab8d75501aa2ffa0b9b593d98a34639de7eb61c673ce43a444369f894791" exitCode=0 Oct 11 04:47:19 crc kubenswrapper[4798]: I1011 04:47:19.503042 4798 generic.go:334] "Generic (PLEG): container finished" podID="679fd723-9ff7-4d15-b769-3d709ed1f9ab" containerID="6a3e25816582fceeef3240445751d55263c4943af22edc52f7d757cb05fad6bb" exitCode=2 Oct 11 04:47:19 crc kubenswrapper[4798]: I1011 04:47:19.503063 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"679fd723-9ff7-4d15-b769-3d709ed1f9ab","Type":"ContainerDied","Data":"79a4ab8d75501aa2ffa0b9b593d98a34639de7eb61c673ce43a444369f894791"} Oct 11 04:47:19 crc kubenswrapper[4798]: I1011 04:47:19.503094 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"679fd723-9ff7-4d15-b769-3d709ed1f9ab","Type":"ContainerDied","Data":"6a3e25816582fceeef3240445751d55263c4943af22edc52f7d757cb05fad6bb"} Oct 11 04:47:19 crc kubenswrapper[4798]: I1011 04:47:19.533670 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-api-0"] Oct 11 04:47:20 crc kubenswrapper[4798]: I1011 04:47:20.518760 4798 generic.go:334] "Generic (PLEG): container finished" podID="679fd723-9ff7-4d15-b769-3d709ed1f9ab" containerID="1b10a01ae2b31c73f5fd84fdb2eaf23fc80e7eb2392c49efe6922e51d1f1d332" exitCode=0 Oct 11 04:47:20 crc kubenswrapper[4798]: I1011 04:47:20.518826 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"679fd723-9ff7-4d15-b769-3d709ed1f9ab","Type":"ContainerDied","Data":"1b10a01ae2b31c73f5fd84fdb2eaf23fc80e7eb2392c49efe6922e51d1f1d332"} Oct 11 04:47:20 crc kubenswrapper[4798]: I1011 04:47:20.522441 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-api-0" event={"ID":"5a696dfc-1e8c-4d2b-9f0e-70bea3ed6d01","Type":"ContainerStarted","Data":"aa7ec0d5eb421654ab0a202d12d6f4e48af1e4dc4d2ff37e46761724b0067625"} Oct 11 04:47:20 crc kubenswrapper[4798]: I1011 04:47:20.522495 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-api-0" event={"ID":"5a696dfc-1e8c-4d2b-9f0e-70bea3ed6d01","Type":"ContainerStarted","Data":"3e08be8060dc8a42187300a0818c8d9036eb3392805a2caf6b6032b0b9128fe7"} Oct 11 04:47:22 crc kubenswrapper[4798]: I1011 04:47:22.121333 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-69655fd4bf-bvxl4" Oct 11 04:47:22 crc kubenswrapper[4798]: I1011 04:47:22.244013 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-fbc59fbb7-l6zlt"] Oct 11 04:47:22 crc kubenswrapper[4798]: I1011 04:47:22.244694 4798 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-fbc59fbb7-l6zlt" podUID="1ecaa1d8-972c-4d57-84ef-9c86b16c3c36" containerName="dnsmasq-dns" containerID="cri-o://5897186685fe77a987b8b5d0a9355871f553c4c9ea21f2cb11d15867e1bae8a2" gracePeriod=10 Oct 11 04:47:22 crc kubenswrapper[4798]: I1011 04:47:22.290594 4798 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/manila-scheduler-0" Oct 11 04:47:22 crc kubenswrapper[4798]: I1011 04:47:22.547499 4798 generic.go:334] "Generic (PLEG): container finished" podID="1ecaa1d8-972c-4d57-84ef-9c86b16c3c36" containerID="5897186685fe77a987b8b5d0a9355871f553c4c9ea21f2cb11d15867e1bae8a2" exitCode=0 Oct 11 04:47:22 crc kubenswrapper[4798]: I1011 04:47:22.547578 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-fbc59fbb7-l6zlt" event={"ID":"1ecaa1d8-972c-4d57-84ef-9c86b16c3c36","Type":"ContainerDied","Data":"5897186685fe77a987b8b5d0a9355871f553c4c9ea21f2cb11d15867e1bae8a2"} Oct 11 04:47:22 crc kubenswrapper[4798]: I1011 04:47:22.548913 4798 generic.go:334] "Generic (PLEG): container finished" podID="0c09ff0d-de2b-4e1d-b39f-f467c8b03fb7" containerID="6e79ada2ce3c5b65f6f244fb7a98a83d07af92873ba6a0cf203af681fb65f44d" exitCode=0 Oct 11 04:47:22 crc kubenswrapper[4798]: I1011 04:47:22.548942 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-78d45886d-tlzfl" event={"ID":"0c09ff0d-de2b-4e1d-b39f-f467c8b03fb7","Type":"ContainerDied","Data":"6e79ada2ce3c5b65f6f244fb7a98a83d07af92873ba6a0cf203af681fb65f44d"} Oct 11 04:47:23 crc kubenswrapper[4798]: I1011 04:47:23.136226 4798 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-78d45886d-tlzfl" podUID="0c09ff0d-de2b-4e1d-b39f-f467c8b03fb7" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.242:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.242:8443: connect: connection refused" Oct 11 04:47:23 crc kubenswrapper[4798]: I1011 04:47:23.558680 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-fbc59fbb7-l6zlt" Oct 11 04:47:23 crc kubenswrapper[4798]: I1011 04:47:23.575777 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-fbc59fbb7-l6zlt" event={"ID":"1ecaa1d8-972c-4d57-84ef-9c86b16c3c36","Type":"ContainerDied","Data":"f45384c58b1f03d8dea6374916101254a0dd55ef5146c561ae7d4e82af2cadfe"} Oct 11 04:47:23 crc kubenswrapper[4798]: I1011 04:47:23.575870 4798 scope.go:117] "RemoveContainer" containerID="5897186685fe77a987b8b5d0a9355871f553c4c9ea21f2cb11d15867e1bae8a2" Oct 11 04:47:23 crc kubenswrapper[4798]: I1011 04:47:23.576069 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-fbc59fbb7-l6zlt" Oct 11 04:47:23 crc kubenswrapper[4798]: I1011 04:47:23.625769 4798 scope.go:117] "RemoveContainer" containerID="a08941f812228abb5cc33df5c8d3ca37b21ed16b84dce06ca863b44ed8b67cab" Oct 11 04:47:23 crc kubenswrapper[4798]: I1011 04:47:23.650379 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7v5tv\" (UniqueName: \"kubernetes.io/projected/1ecaa1d8-972c-4d57-84ef-9c86b16c3c36-kube-api-access-7v5tv\") pod \"1ecaa1d8-972c-4d57-84ef-9c86b16c3c36\" (UID: \"1ecaa1d8-972c-4d57-84ef-9c86b16c3c36\") " Oct 11 04:47:23 crc kubenswrapper[4798]: I1011 04:47:23.650927 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1ecaa1d8-972c-4d57-84ef-9c86b16c3c36-config\") pod \"1ecaa1d8-972c-4d57-84ef-9c86b16c3c36\" (UID: \"1ecaa1d8-972c-4d57-84ef-9c86b16c3c36\") " Oct 11 04:47:23 crc kubenswrapper[4798]: I1011 04:47:23.650987 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/1ecaa1d8-972c-4d57-84ef-9c86b16c3c36-openstack-edpm-ipam\") pod \"1ecaa1d8-972c-4d57-84ef-9c86b16c3c36\" (UID: \"1ecaa1d8-972c-4d57-84ef-9c86b16c3c36\") " Oct 11 04:47:23 crc kubenswrapper[4798]: I1011 04:47:23.651089 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1ecaa1d8-972c-4d57-84ef-9c86b16c3c36-dns-svc\") pod \"1ecaa1d8-972c-4d57-84ef-9c86b16c3c36\" (UID: \"1ecaa1d8-972c-4d57-84ef-9c86b16c3c36\") " Oct 11 04:47:23 crc kubenswrapper[4798]: I1011 04:47:23.651191 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/1ecaa1d8-972c-4d57-84ef-9c86b16c3c36-ovsdbserver-nb\") pod \"1ecaa1d8-972c-4d57-84ef-9c86b16c3c36\" (UID: \"1ecaa1d8-972c-4d57-84ef-9c86b16c3c36\") " Oct 11 04:47:23 crc kubenswrapper[4798]: I1011 04:47:23.651319 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/1ecaa1d8-972c-4d57-84ef-9c86b16c3c36-ovsdbserver-sb\") pod \"1ecaa1d8-972c-4d57-84ef-9c86b16c3c36\" (UID: \"1ecaa1d8-972c-4d57-84ef-9c86b16c3c36\") " Oct 11 04:47:23 crc kubenswrapper[4798]: I1011 04:47:23.663664 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1ecaa1d8-972c-4d57-84ef-9c86b16c3c36-kube-api-access-7v5tv" (OuterVolumeSpecName: "kube-api-access-7v5tv") pod "1ecaa1d8-972c-4d57-84ef-9c86b16c3c36" (UID: "1ecaa1d8-972c-4d57-84ef-9c86b16c3c36"). InnerVolumeSpecName "kube-api-access-7v5tv". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 04:47:23 crc kubenswrapper[4798]: I1011 04:47:23.754281 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7v5tv\" (UniqueName: \"kubernetes.io/projected/1ecaa1d8-972c-4d57-84ef-9c86b16c3c36-kube-api-access-7v5tv\") on node \"crc\" DevicePath \"\"" Oct 11 04:47:23 crc kubenswrapper[4798]: I1011 04:47:23.758285 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1ecaa1d8-972c-4d57-84ef-9c86b16c3c36-config" (OuterVolumeSpecName: "config") pod "1ecaa1d8-972c-4d57-84ef-9c86b16c3c36" (UID: "1ecaa1d8-972c-4d57-84ef-9c86b16c3c36"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 04:47:23 crc kubenswrapper[4798]: I1011 04:47:23.766152 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1ecaa1d8-972c-4d57-84ef-9c86b16c3c36-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "1ecaa1d8-972c-4d57-84ef-9c86b16c3c36" (UID: "1ecaa1d8-972c-4d57-84ef-9c86b16c3c36"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 04:47:23 crc kubenswrapper[4798]: I1011 04:47:23.772028 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1ecaa1d8-972c-4d57-84ef-9c86b16c3c36-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "1ecaa1d8-972c-4d57-84ef-9c86b16c3c36" (UID: "1ecaa1d8-972c-4d57-84ef-9c86b16c3c36"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 04:47:23 crc kubenswrapper[4798]: I1011 04:47:23.780018 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1ecaa1d8-972c-4d57-84ef-9c86b16c3c36-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "1ecaa1d8-972c-4d57-84ef-9c86b16c3c36" (UID: "1ecaa1d8-972c-4d57-84ef-9c86b16c3c36"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 04:47:23 crc kubenswrapper[4798]: I1011 04:47:23.780133 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1ecaa1d8-972c-4d57-84ef-9c86b16c3c36-openstack-edpm-ipam" (OuterVolumeSpecName: "openstack-edpm-ipam") pod "1ecaa1d8-972c-4d57-84ef-9c86b16c3c36" (UID: "1ecaa1d8-972c-4d57-84ef-9c86b16c3c36"). InnerVolumeSpecName "openstack-edpm-ipam". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 04:47:23 crc kubenswrapper[4798]: I1011 04:47:23.858547 4798 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1ecaa1d8-972c-4d57-84ef-9c86b16c3c36-dns-svc\") on node \"crc\" DevicePath \"\"" Oct 11 04:47:23 crc kubenswrapper[4798]: I1011 04:47:23.858672 4798 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/1ecaa1d8-972c-4d57-84ef-9c86b16c3c36-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Oct 11 04:47:23 crc kubenswrapper[4798]: I1011 04:47:23.858826 4798 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/1ecaa1d8-972c-4d57-84ef-9c86b16c3c36-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Oct 11 04:47:23 crc kubenswrapper[4798]: I1011 04:47:23.858923 4798 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1ecaa1d8-972c-4d57-84ef-9c86b16c3c36-config\") on node \"crc\" DevicePath \"\"" Oct 11 04:47:23 crc kubenswrapper[4798]: I1011 04:47:23.858988 4798 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/1ecaa1d8-972c-4d57-84ef-9c86b16c3c36-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Oct 11 04:47:23 crc kubenswrapper[4798]: I1011 04:47:23.920729 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-fbc59fbb7-l6zlt"] Oct 11 04:47:23 crc kubenswrapper[4798]: I1011 04:47:23.928081 4798 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-fbc59fbb7-l6zlt"] Oct 11 04:47:24 crc kubenswrapper[4798]: I1011 04:47:24.591892 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-api-0" event={"ID":"5a696dfc-1e8c-4d2b-9f0e-70bea3ed6d01","Type":"ContainerStarted","Data":"3a2fb13a1095412fb78a02a1ef0253bfabf117b9eebffa695291d470ae546a53"} Oct 11 04:47:24 crc kubenswrapper[4798]: I1011 04:47:24.592425 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/manila-api-0" Oct 11 04:47:24 crc kubenswrapper[4798]: I1011 04:47:24.594014 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-share-share1-0" event={"ID":"3aa8fa74-0fd3-468d-935b-a819f2383dfb","Type":"ContainerStarted","Data":"cddbb04d4f074bea6fc46f11006ddccdc4411586963a24a0a556330a4164b0bc"} Oct 11 04:47:24 crc kubenswrapper[4798]: I1011 04:47:24.594068 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-share-share1-0" event={"ID":"3aa8fa74-0fd3-468d-935b-a819f2383dfb","Type":"ContainerStarted","Data":"a2bcddff7277f6c827cc95a984acddb98118e5a3debaeb243aa493fc2447e1c8"} Oct 11 04:47:24 crc kubenswrapper[4798]: I1011 04:47:24.619623 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/manila-api-0" podStartSLOduration=6.619597681 podStartE2EDuration="6.619597681s" podCreationTimestamp="2025-10-11 04:47:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 04:47:24.617125823 +0000 UTC m=+3139.953415509" watchObservedRunningTime="2025-10-11 04:47:24.619597681 +0000 UTC m=+3139.955887367" Oct 11 04:47:24 crc kubenswrapper[4798]: I1011 04:47:24.644282 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/manila-share-share1-0" podStartSLOduration=3.241752246 podStartE2EDuration="13.6442626s" podCreationTimestamp="2025-10-11 04:47:11 +0000 UTC" firstStartedPulling="2025-10-11 04:47:12.867334281 +0000 UTC m=+3128.203623967" lastFinishedPulling="2025-10-11 04:47:23.269844635 +0000 UTC m=+3138.606134321" observedRunningTime="2025-10-11 04:47:24.642244873 +0000 UTC m=+3139.978534579" watchObservedRunningTime="2025-10-11 04:47:24.6442626 +0000 UTC m=+3139.980552286" Oct 11 04:47:25 crc kubenswrapper[4798]: I1011 04:47:25.129929 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Oct 11 04:47:25 crc kubenswrapper[4798]: I1011 04:47:25.207136 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/679fd723-9ff7-4d15-b769-3d709ed1f9ab-log-httpd\") pod \"679fd723-9ff7-4d15-b769-3d709ed1f9ab\" (UID: \"679fd723-9ff7-4d15-b769-3d709ed1f9ab\") " Oct 11 04:47:25 crc kubenswrapper[4798]: I1011 04:47:25.207198 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nv8hd\" (UniqueName: \"kubernetes.io/projected/679fd723-9ff7-4d15-b769-3d709ed1f9ab-kube-api-access-nv8hd\") pod \"679fd723-9ff7-4d15-b769-3d709ed1f9ab\" (UID: \"679fd723-9ff7-4d15-b769-3d709ed1f9ab\") " Oct 11 04:47:25 crc kubenswrapper[4798]: I1011 04:47:25.207264 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/679fd723-9ff7-4d15-b769-3d709ed1f9ab-combined-ca-bundle\") pod \"679fd723-9ff7-4d15-b769-3d709ed1f9ab\" (UID: \"679fd723-9ff7-4d15-b769-3d709ed1f9ab\") " Oct 11 04:47:25 crc kubenswrapper[4798]: I1011 04:47:25.207314 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/679fd723-9ff7-4d15-b769-3d709ed1f9ab-ceilometer-tls-certs\") pod \"679fd723-9ff7-4d15-b769-3d709ed1f9ab\" (UID: \"679fd723-9ff7-4d15-b769-3d709ed1f9ab\") " Oct 11 04:47:25 crc kubenswrapper[4798]: I1011 04:47:25.207364 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/679fd723-9ff7-4d15-b769-3d709ed1f9ab-run-httpd\") pod \"679fd723-9ff7-4d15-b769-3d709ed1f9ab\" (UID: \"679fd723-9ff7-4d15-b769-3d709ed1f9ab\") " Oct 11 04:47:25 crc kubenswrapper[4798]: I1011 04:47:25.207409 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/679fd723-9ff7-4d15-b769-3d709ed1f9ab-sg-core-conf-yaml\") pod \"679fd723-9ff7-4d15-b769-3d709ed1f9ab\" (UID: \"679fd723-9ff7-4d15-b769-3d709ed1f9ab\") " Oct 11 04:47:25 crc kubenswrapper[4798]: I1011 04:47:25.207441 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/679fd723-9ff7-4d15-b769-3d709ed1f9ab-config-data\") pod \"679fd723-9ff7-4d15-b769-3d709ed1f9ab\" (UID: \"679fd723-9ff7-4d15-b769-3d709ed1f9ab\") " Oct 11 04:47:25 crc kubenswrapper[4798]: I1011 04:47:25.207456 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/679fd723-9ff7-4d15-b769-3d709ed1f9ab-scripts\") pod \"679fd723-9ff7-4d15-b769-3d709ed1f9ab\" (UID: \"679fd723-9ff7-4d15-b769-3d709ed1f9ab\") " Oct 11 04:47:25 crc kubenswrapper[4798]: I1011 04:47:25.207645 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/679fd723-9ff7-4d15-b769-3d709ed1f9ab-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "679fd723-9ff7-4d15-b769-3d709ed1f9ab" (UID: "679fd723-9ff7-4d15-b769-3d709ed1f9ab"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 04:47:25 crc kubenswrapper[4798]: I1011 04:47:25.207902 4798 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/679fd723-9ff7-4d15-b769-3d709ed1f9ab-log-httpd\") on node \"crc\" DevicePath \"\"" Oct 11 04:47:25 crc kubenswrapper[4798]: I1011 04:47:25.208055 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/679fd723-9ff7-4d15-b769-3d709ed1f9ab-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "679fd723-9ff7-4d15-b769-3d709ed1f9ab" (UID: "679fd723-9ff7-4d15-b769-3d709ed1f9ab"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 04:47:25 crc kubenswrapper[4798]: I1011 04:47:25.215845 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/679fd723-9ff7-4d15-b769-3d709ed1f9ab-kube-api-access-nv8hd" (OuterVolumeSpecName: "kube-api-access-nv8hd") pod "679fd723-9ff7-4d15-b769-3d709ed1f9ab" (UID: "679fd723-9ff7-4d15-b769-3d709ed1f9ab"). InnerVolumeSpecName "kube-api-access-nv8hd". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 04:47:25 crc kubenswrapper[4798]: I1011 04:47:25.229460 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/679fd723-9ff7-4d15-b769-3d709ed1f9ab-scripts" (OuterVolumeSpecName: "scripts") pod "679fd723-9ff7-4d15-b769-3d709ed1f9ab" (UID: "679fd723-9ff7-4d15-b769-3d709ed1f9ab"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:47:25 crc kubenswrapper[4798]: I1011 04:47:25.304955 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/679fd723-9ff7-4d15-b769-3d709ed1f9ab-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "679fd723-9ff7-4d15-b769-3d709ed1f9ab" (UID: "679fd723-9ff7-4d15-b769-3d709ed1f9ab"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:47:25 crc kubenswrapper[4798]: I1011 04:47:25.311985 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nv8hd\" (UniqueName: \"kubernetes.io/projected/679fd723-9ff7-4d15-b769-3d709ed1f9ab-kube-api-access-nv8hd\") on node \"crc\" DevicePath \"\"" Oct 11 04:47:25 crc kubenswrapper[4798]: I1011 04:47:25.312032 4798 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/679fd723-9ff7-4d15-b769-3d709ed1f9ab-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Oct 11 04:47:25 crc kubenswrapper[4798]: I1011 04:47:25.312052 4798 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/679fd723-9ff7-4d15-b769-3d709ed1f9ab-run-httpd\") on node \"crc\" DevicePath \"\"" Oct 11 04:47:25 crc kubenswrapper[4798]: I1011 04:47:25.312070 4798 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/679fd723-9ff7-4d15-b769-3d709ed1f9ab-scripts\") on node \"crc\" DevicePath \"\"" Oct 11 04:47:25 crc kubenswrapper[4798]: I1011 04:47:25.318235 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/679fd723-9ff7-4d15-b769-3d709ed1f9ab-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "679fd723-9ff7-4d15-b769-3d709ed1f9ab" (UID: "679fd723-9ff7-4d15-b769-3d709ed1f9ab"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:47:25 crc kubenswrapper[4798]: I1011 04:47:25.345992 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/679fd723-9ff7-4d15-b769-3d709ed1f9ab-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "679fd723-9ff7-4d15-b769-3d709ed1f9ab" (UID: "679fd723-9ff7-4d15-b769-3d709ed1f9ab"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:47:25 crc kubenswrapper[4798]: I1011 04:47:25.376586 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/679fd723-9ff7-4d15-b769-3d709ed1f9ab-config-data" (OuterVolumeSpecName: "config-data") pod "679fd723-9ff7-4d15-b769-3d709ed1f9ab" (UID: "679fd723-9ff7-4d15-b769-3d709ed1f9ab"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:47:25 crc kubenswrapper[4798]: I1011 04:47:25.414200 4798 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/679fd723-9ff7-4d15-b769-3d709ed1f9ab-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 11 04:47:25 crc kubenswrapper[4798]: I1011 04:47:25.414257 4798 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/679fd723-9ff7-4d15-b769-3d709ed1f9ab-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Oct 11 04:47:25 crc kubenswrapper[4798]: I1011 04:47:25.414274 4798 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/679fd723-9ff7-4d15-b769-3d709ed1f9ab-config-data\") on node \"crc\" DevicePath \"\"" Oct 11 04:47:25 crc kubenswrapper[4798]: I1011 04:47:25.437120 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1ecaa1d8-972c-4d57-84ef-9c86b16c3c36" path="/var/lib/kubelet/pods/1ecaa1d8-972c-4d57-84ef-9c86b16c3c36/volumes" Oct 11 04:47:25 crc kubenswrapper[4798]: I1011 04:47:25.614744 4798 generic.go:334] "Generic (PLEG): container finished" podID="679fd723-9ff7-4d15-b769-3d709ed1f9ab" containerID="d90b580096c09653e41e84e5f964e8af6a915f538f16413ff9076927b16fc167" exitCode=0 Oct 11 04:47:25 crc kubenswrapper[4798]: I1011 04:47:25.614888 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Oct 11 04:47:25 crc kubenswrapper[4798]: I1011 04:47:25.614880 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"679fd723-9ff7-4d15-b769-3d709ed1f9ab","Type":"ContainerDied","Data":"d90b580096c09653e41e84e5f964e8af6a915f538f16413ff9076927b16fc167"} Oct 11 04:47:25 crc kubenswrapper[4798]: I1011 04:47:25.614982 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"679fd723-9ff7-4d15-b769-3d709ed1f9ab","Type":"ContainerDied","Data":"5c79018e2ed96f374bf95f105d41860ae991ef1e79919403414d99795370b586"} Oct 11 04:47:25 crc kubenswrapper[4798]: I1011 04:47:25.615024 4798 scope.go:117] "RemoveContainer" containerID="79a4ab8d75501aa2ffa0b9b593d98a34639de7eb61c673ce43a444369f894791" Oct 11 04:47:25 crc kubenswrapper[4798]: I1011 04:47:25.645003 4798 scope.go:117] "RemoveContainer" containerID="6a3e25816582fceeef3240445751d55263c4943af22edc52f7d757cb05fad6bb" Oct 11 04:47:25 crc kubenswrapper[4798]: I1011 04:47:25.652040 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Oct 11 04:47:25 crc kubenswrapper[4798]: I1011 04:47:25.668140 4798 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Oct 11 04:47:25 crc kubenswrapper[4798]: I1011 04:47:25.675620 4798 scope.go:117] "RemoveContainer" containerID="d90b580096c09653e41e84e5f964e8af6a915f538f16413ff9076927b16fc167" Oct 11 04:47:25 crc kubenswrapper[4798]: I1011 04:47:25.683528 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Oct 11 04:47:25 crc kubenswrapper[4798]: E1011 04:47:25.684039 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1ecaa1d8-972c-4d57-84ef-9c86b16c3c36" containerName="init" Oct 11 04:47:25 crc kubenswrapper[4798]: I1011 04:47:25.684059 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="1ecaa1d8-972c-4d57-84ef-9c86b16c3c36" containerName="init" Oct 11 04:47:25 crc kubenswrapper[4798]: E1011 04:47:25.684075 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="679fd723-9ff7-4d15-b769-3d709ed1f9ab" containerName="sg-core" Oct 11 04:47:25 crc kubenswrapper[4798]: I1011 04:47:25.684082 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="679fd723-9ff7-4d15-b769-3d709ed1f9ab" containerName="sg-core" Oct 11 04:47:25 crc kubenswrapper[4798]: E1011 04:47:25.684100 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="679fd723-9ff7-4d15-b769-3d709ed1f9ab" containerName="proxy-httpd" Oct 11 04:47:25 crc kubenswrapper[4798]: I1011 04:47:25.684106 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="679fd723-9ff7-4d15-b769-3d709ed1f9ab" containerName="proxy-httpd" Oct 11 04:47:25 crc kubenswrapper[4798]: E1011 04:47:25.684119 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="679fd723-9ff7-4d15-b769-3d709ed1f9ab" containerName="ceilometer-central-agent" Oct 11 04:47:25 crc kubenswrapper[4798]: I1011 04:47:25.684125 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="679fd723-9ff7-4d15-b769-3d709ed1f9ab" containerName="ceilometer-central-agent" Oct 11 04:47:25 crc kubenswrapper[4798]: E1011 04:47:25.684139 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="679fd723-9ff7-4d15-b769-3d709ed1f9ab" containerName="ceilometer-notification-agent" Oct 11 04:47:25 crc kubenswrapper[4798]: I1011 04:47:25.684148 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="679fd723-9ff7-4d15-b769-3d709ed1f9ab" containerName="ceilometer-notification-agent" Oct 11 04:47:25 crc kubenswrapper[4798]: E1011 04:47:25.684192 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1ecaa1d8-972c-4d57-84ef-9c86b16c3c36" containerName="dnsmasq-dns" Oct 11 04:47:25 crc kubenswrapper[4798]: I1011 04:47:25.684200 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="1ecaa1d8-972c-4d57-84ef-9c86b16c3c36" containerName="dnsmasq-dns" Oct 11 04:47:25 crc kubenswrapper[4798]: I1011 04:47:25.684406 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="679fd723-9ff7-4d15-b769-3d709ed1f9ab" containerName="ceilometer-central-agent" Oct 11 04:47:25 crc kubenswrapper[4798]: I1011 04:47:25.684418 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="679fd723-9ff7-4d15-b769-3d709ed1f9ab" containerName="sg-core" Oct 11 04:47:25 crc kubenswrapper[4798]: I1011 04:47:25.684432 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="679fd723-9ff7-4d15-b769-3d709ed1f9ab" containerName="proxy-httpd" Oct 11 04:47:25 crc kubenswrapper[4798]: I1011 04:47:25.684447 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="679fd723-9ff7-4d15-b769-3d709ed1f9ab" containerName="ceilometer-notification-agent" Oct 11 04:47:25 crc kubenswrapper[4798]: I1011 04:47:25.684458 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="1ecaa1d8-972c-4d57-84ef-9c86b16c3c36" containerName="dnsmasq-dns" Oct 11 04:47:25 crc kubenswrapper[4798]: I1011 04:47:25.686546 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Oct 11 04:47:25 crc kubenswrapper[4798]: I1011 04:47:25.689499 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Oct 11 04:47:25 crc kubenswrapper[4798]: I1011 04:47:25.690044 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Oct 11 04:47:25 crc kubenswrapper[4798]: I1011 04:47:25.697327 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Oct 11 04:47:25 crc kubenswrapper[4798]: I1011 04:47:25.703848 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Oct 11 04:47:25 crc kubenswrapper[4798]: I1011 04:47:25.738010 4798 scope.go:117] "RemoveContainer" containerID="1b10a01ae2b31c73f5fd84fdb2eaf23fc80e7eb2392c49efe6922e51d1f1d332" Oct 11 04:47:25 crc kubenswrapper[4798]: I1011 04:47:25.774531 4798 scope.go:117] "RemoveContainer" containerID="79a4ab8d75501aa2ffa0b9b593d98a34639de7eb61c673ce43a444369f894791" Oct 11 04:47:25 crc kubenswrapper[4798]: E1011 04:47:25.777203 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"79a4ab8d75501aa2ffa0b9b593d98a34639de7eb61c673ce43a444369f894791\": container with ID starting with 79a4ab8d75501aa2ffa0b9b593d98a34639de7eb61c673ce43a444369f894791 not found: ID does not exist" containerID="79a4ab8d75501aa2ffa0b9b593d98a34639de7eb61c673ce43a444369f894791" Oct 11 04:47:25 crc kubenswrapper[4798]: I1011 04:47:25.777269 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"79a4ab8d75501aa2ffa0b9b593d98a34639de7eb61c673ce43a444369f894791"} err="failed to get container status \"79a4ab8d75501aa2ffa0b9b593d98a34639de7eb61c673ce43a444369f894791\": rpc error: code = NotFound desc = could not find container \"79a4ab8d75501aa2ffa0b9b593d98a34639de7eb61c673ce43a444369f894791\": container with ID starting with 79a4ab8d75501aa2ffa0b9b593d98a34639de7eb61c673ce43a444369f894791 not found: ID does not exist" Oct 11 04:47:25 crc kubenswrapper[4798]: I1011 04:47:25.777300 4798 scope.go:117] "RemoveContainer" containerID="6a3e25816582fceeef3240445751d55263c4943af22edc52f7d757cb05fad6bb" Oct 11 04:47:25 crc kubenswrapper[4798]: E1011 04:47:25.777939 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6a3e25816582fceeef3240445751d55263c4943af22edc52f7d757cb05fad6bb\": container with ID starting with 6a3e25816582fceeef3240445751d55263c4943af22edc52f7d757cb05fad6bb not found: ID does not exist" containerID="6a3e25816582fceeef3240445751d55263c4943af22edc52f7d757cb05fad6bb" Oct 11 04:47:25 crc kubenswrapper[4798]: I1011 04:47:25.778017 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6a3e25816582fceeef3240445751d55263c4943af22edc52f7d757cb05fad6bb"} err="failed to get container status \"6a3e25816582fceeef3240445751d55263c4943af22edc52f7d757cb05fad6bb\": rpc error: code = NotFound desc = could not find container \"6a3e25816582fceeef3240445751d55263c4943af22edc52f7d757cb05fad6bb\": container with ID starting with 6a3e25816582fceeef3240445751d55263c4943af22edc52f7d757cb05fad6bb not found: ID does not exist" Oct 11 04:47:25 crc kubenswrapper[4798]: I1011 04:47:25.778058 4798 scope.go:117] "RemoveContainer" containerID="d90b580096c09653e41e84e5f964e8af6a915f538f16413ff9076927b16fc167" Oct 11 04:47:25 crc kubenswrapper[4798]: E1011 04:47:25.778721 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d90b580096c09653e41e84e5f964e8af6a915f538f16413ff9076927b16fc167\": container with ID starting with d90b580096c09653e41e84e5f964e8af6a915f538f16413ff9076927b16fc167 not found: ID does not exist" containerID="d90b580096c09653e41e84e5f964e8af6a915f538f16413ff9076927b16fc167" Oct 11 04:47:25 crc kubenswrapper[4798]: I1011 04:47:25.778771 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d90b580096c09653e41e84e5f964e8af6a915f538f16413ff9076927b16fc167"} err="failed to get container status \"d90b580096c09653e41e84e5f964e8af6a915f538f16413ff9076927b16fc167\": rpc error: code = NotFound desc = could not find container \"d90b580096c09653e41e84e5f964e8af6a915f538f16413ff9076927b16fc167\": container with ID starting with d90b580096c09653e41e84e5f964e8af6a915f538f16413ff9076927b16fc167 not found: ID does not exist" Oct 11 04:47:25 crc kubenswrapper[4798]: I1011 04:47:25.778810 4798 scope.go:117] "RemoveContainer" containerID="1b10a01ae2b31c73f5fd84fdb2eaf23fc80e7eb2392c49efe6922e51d1f1d332" Oct 11 04:47:25 crc kubenswrapper[4798]: E1011 04:47:25.779159 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1b10a01ae2b31c73f5fd84fdb2eaf23fc80e7eb2392c49efe6922e51d1f1d332\": container with ID starting with 1b10a01ae2b31c73f5fd84fdb2eaf23fc80e7eb2392c49efe6922e51d1f1d332 not found: ID does not exist" containerID="1b10a01ae2b31c73f5fd84fdb2eaf23fc80e7eb2392c49efe6922e51d1f1d332" Oct 11 04:47:25 crc kubenswrapper[4798]: I1011 04:47:25.779201 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1b10a01ae2b31c73f5fd84fdb2eaf23fc80e7eb2392c49efe6922e51d1f1d332"} err="failed to get container status \"1b10a01ae2b31c73f5fd84fdb2eaf23fc80e7eb2392c49efe6922e51d1f1d332\": rpc error: code = NotFound desc = could not find container \"1b10a01ae2b31c73f5fd84fdb2eaf23fc80e7eb2392c49efe6922e51d1f1d332\": container with ID starting with 1b10a01ae2b31c73f5fd84fdb2eaf23fc80e7eb2392c49efe6922e51d1f1d332 not found: ID does not exist" Oct 11 04:47:25 crc kubenswrapper[4798]: I1011 04:47:25.831980 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/2f089830-3555-4b43-bea2-68577bec1e15-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"2f089830-3555-4b43-bea2-68577bec1e15\") " pod="openstack/ceilometer-0" Oct 11 04:47:25 crc kubenswrapper[4798]: I1011 04:47:25.832035 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nwgtb\" (UniqueName: \"kubernetes.io/projected/2f089830-3555-4b43-bea2-68577bec1e15-kube-api-access-nwgtb\") pod \"ceilometer-0\" (UID: \"2f089830-3555-4b43-bea2-68577bec1e15\") " pod="openstack/ceilometer-0" Oct 11 04:47:25 crc kubenswrapper[4798]: I1011 04:47:25.832083 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2f089830-3555-4b43-bea2-68577bec1e15-scripts\") pod \"ceilometer-0\" (UID: \"2f089830-3555-4b43-bea2-68577bec1e15\") " pod="openstack/ceilometer-0" Oct 11 04:47:25 crc kubenswrapper[4798]: I1011 04:47:25.832147 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2f089830-3555-4b43-bea2-68577bec1e15-run-httpd\") pod \"ceilometer-0\" (UID: \"2f089830-3555-4b43-bea2-68577bec1e15\") " pod="openstack/ceilometer-0" Oct 11 04:47:25 crc kubenswrapper[4798]: I1011 04:47:25.832881 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/2f089830-3555-4b43-bea2-68577bec1e15-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"2f089830-3555-4b43-bea2-68577bec1e15\") " pod="openstack/ceilometer-0" Oct 11 04:47:25 crc kubenswrapper[4798]: I1011 04:47:25.834059 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2f089830-3555-4b43-bea2-68577bec1e15-config-data\") pod \"ceilometer-0\" (UID: \"2f089830-3555-4b43-bea2-68577bec1e15\") " pod="openstack/ceilometer-0" Oct 11 04:47:25 crc kubenswrapper[4798]: I1011 04:47:25.834101 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2f089830-3555-4b43-bea2-68577bec1e15-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"2f089830-3555-4b43-bea2-68577bec1e15\") " pod="openstack/ceilometer-0" Oct 11 04:47:25 crc kubenswrapper[4798]: I1011 04:47:25.834174 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2f089830-3555-4b43-bea2-68577bec1e15-log-httpd\") pod \"ceilometer-0\" (UID: \"2f089830-3555-4b43-bea2-68577bec1e15\") " pod="openstack/ceilometer-0" Oct 11 04:47:25 crc kubenswrapper[4798]: I1011 04:47:25.936931 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2f089830-3555-4b43-bea2-68577bec1e15-config-data\") pod \"ceilometer-0\" (UID: \"2f089830-3555-4b43-bea2-68577bec1e15\") " pod="openstack/ceilometer-0" Oct 11 04:47:25 crc kubenswrapper[4798]: I1011 04:47:25.936989 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2f089830-3555-4b43-bea2-68577bec1e15-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"2f089830-3555-4b43-bea2-68577bec1e15\") " pod="openstack/ceilometer-0" Oct 11 04:47:25 crc kubenswrapper[4798]: I1011 04:47:25.937054 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2f089830-3555-4b43-bea2-68577bec1e15-log-httpd\") pod \"ceilometer-0\" (UID: \"2f089830-3555-4b43-bea2-68577bec1e15\") " pod="openstack/ceilometer-0" Oct 11 04:47:25 crc kubenswrapper[4798]: I1011 04:47:25.937122 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/2f089830-3555-4b43-bea2-68577bec1e15-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"2f089830-3555-4b43-bea2-68577bec1e15\") " pod="openstack/ceilometer-0" Oct 11 04:47:25 crc kubenswrapper[4798]: I1011 04:47:25.937154 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nwgtb\" (UniqueName: \"kubernetes.io/projected/2f089830-3555-4b43-bea2-68577bec1e15-kube-api-access-nwgtb\") pod \"ceilometer-0\" (UID: \"2f089830-3555-4b43-bea2-68577bec1e15\") " pod="openstack/ceilometer-0" Oct 11 04:47:25 crc kubenswrapper[4798]: I1011 04:47:25.937175 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2f089830-3555-4b43-bea2-68577bec1e15-scripts\") pod \"ceilometer-0\" (UID: \"2f089830-3555-4b43-bea2-68577bec1e15\") " pod="openstack/ceilometer-0" Oct 11 04:47:25 crc kubenswrapper[4798]: I1011 04:47:25.937199 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2f089830-3555-4b43-bea2-68577bec1e15-run-httpd\") pod \"ceilometer-0\" (UID: \"2f089830-3555-4b43-bea2-68577bec1e15\") " pod="openstack/ceilometer-0" Oct 11 04:47:25 crc kubenswrapper[4798]: I1011 04:47:25.937254 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/2f089830-3555-4b43-bea2-68577bec1e15-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"2f089830-3555-4b43-bea2-68577bec1e15\") " pod="openstack/ceilometer-0" Oct 11 04:47:25 crc kubenswrapper[4798]: I1011 04:47:25.937792 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2f089830-3555-4b43-bea2-68577bec1e15-log-httpd\") pod \"ceilometer-0\" (UID: \"2f089830-3555-4b43-bea2-68577bec1e15\") " pod="openstack/ceilometer-0" Oct 11 04:47:25 crc kubenswrapper[4798]: I1011 04:47:25.938244 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2f089830-3555-4b43-bea2-68577bec1e15-run-httpd\") pod \"ceilometer-0\" (UID: \"2f089830-3555-4b43-bea2-68577bec1e15\") " pod="openstack/ceilometer-0" Oct 11 04:47:25 crc kubenswrapper[4798]: I1011 04:47:25.943430 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/2f089830-3555-4b43-bea2-68577bec1e15-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"2f089830-3555-4b43-bea2-68577bec1e15\") " pod="openstack/ceilometer-0" Oct 11 04:47:25 crc kubenswrapper[4798]: I1011 04:47:25.943614 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2f089830-3555-4b43-bea2-68577bec1e15-config-data\") pod \"ceilometer-0\" (UID: \"2f089830-3555-4b43-bea2-68577bec1e15\") " pod="openstack/ceilometer-0" Oct 11 04:47:25 crc kubenswrapper[4798]: I1011 04:47:25.944154 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/2f089830-3555-4b43-bea2-68577bec1e15-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"2f089830-3555-4b43-bea2-68577bec1e15\") " pod="openstack/ceilometer-0" Oct 11 04:47:25 crc kubenswrapper[4798]: I1011 04:47:25.944593 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2f089830-3555-4b43-bea2-68577bec1e15-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"2f089830-3555-4b43-bea2-68577bec1e15\") " pod="openstack/ceilometer-0" Oct 11 04:47:25 crc kubenswrapper[4798]: I1011 04:47:25.946518 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2f089830-3555-4b43-bea2-68577bec1e15-scripts\") pod \"ceilometer-0\" (UID: \"2f089830-3555-4b43-bea2-68577bec1e15\") " pod="openstack/ceilometer-0" Oct 11 04:47:25 crc kubenswrapper[4798]: I1011 04:47:25.952681 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nwgtb\" (UniqueName: \"kubernetes.io/projected/2f089830-3555-4b43-bea2-68577bec1e15-kube-api-access-nwgtb\") pod \"ceilometer-0\" (UID: \"2f089830-3555-4b43-bea2-68577bec1e15\") " pod="openstack/ceilometer-0" Oct 11 04:47:26 crc kubenswrapper[4798]: I1011 04:47:26.008671 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Oct 11 04:47:26 crc kubenswrapper[4798]: I1011 04:47:26.079188 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Oct 11 04:47:26 crc kubenswrapper[4798]: I1011 04:47:26.534078 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Oct 11 04:47:26 crc kubenswrapper[4798]: W1011 04:47:26.545759 4798 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2f089830_3555_4b43_bea2_68577bec1e15.slice/crio-5dfe8e265fa3c32000882abd008260202dca04a4b6acf65c2e20ada89fda104d WatchSource:0}: Error finding container 5dfe8e265fa3c32000882abd008260202dca04a4b6acf65c2e20ada89fda104d: Status 404 returned error can't find the container with id 5dfe8e265fa3c32000882abd008260202dca04a4b6acf65c2e20ada89fda104d Oct 11 04:47:26 crc kubenswrapper[4798]: I1011 04:47:26.630495 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"2f089830-3555-4b43-bea2-68577bec1e15","Type":"ContainerStarted","Data":"5dfe8e265fa3c32000882abd008260202dca04a4b6acf65c2e20ada89fda104d"} Oct 11 04:47:27 crc kubenswrapper[4798]: I1011 04:47:27.423940 4798 scope.go:117] "RemoveContainer" containerID="999be6c75e4c41e3d1de7a3a50ee34084c0f4ead3f5f5fdebf32b1d0054c6b5f" Oct 11 04:47:27 crc kubenswrapper[4798]: I1011 04:47:27.439260 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="679fd723-9ff7-4d15-b769-3d709ed1f9ab" path="/var/lib/kubelet/pods/679fd723-9ff7-4d15-b769-3d709ed1f9ab/volumes" Oct 11 04:47:27 crc kubenswrapper[4798]: I1011 04:47:27.643038 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"2f089830-3555-4b43-bea2-68577bec1e15","Type":"ContainerStarted","Data":"a5a8ee4de65401540b4f6ebd0659fa1850ca458f63737633e539cb39e0617583"} Oct 11 04:47:28 crc kubenswrapper[4798]: I1011 04:47:28.656124 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" event={"ID":"42571bc8-2186-4e3b-bba9-28f5a8f364d0","Type":"ContainerStarted","Data":"d5e0dca4104eeca7c9a76b11cb7ae2acd478e963ddf4e0a9647a5803b7ebfc90"} Oct 11 04:47:28 crc kubenswrapper[4798]: I1011 04:47:28.664030 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"2f089830-3555-4b43-bea2-68577bec1e15","Type":"ContainerStarted","Data":"67f4411d3252818b638e23b19db74ae206b5880032d39a76c8f82c4c4f8b7bc2"} Oct 11 04:47:29 crc kubenswrapper[4798]: I1011 04:47:29.711799 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"2f089830-3555-4b43-bea2-68577bec1e15","Type":"ContainerStarted","Data":"67d13f0af49ee463f3843cfec5e278782d267e2dff785e42fd5861eb34d2df41"} Oct 11 04:47:32 crc kubenswrapper[4798]: I1011 04:47:32.043266 4798 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/manila-share-share1-0" Oct 11 04:47:32 crc kubenswrapper[4798]: I1011 04:47:32.756293 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"2f089830-3555-4b43-bea2-68577bec1e15","Type":"ContainerStarted","Data":"3f963c7257923ff3adb330ed123fadd6ba0624d6d56587d7ad6e4706c4f0071a"} Oct 11 04:47:32 crc kubenswrapper[4798]: I1011 04:47:32.757272 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Oct 11 04:47:32 crc kubenswrapper[4798]: I1011 04:47:32.756526 4798 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="2f089830-3555-4b43-bea2-68577bec1e15" containerName="ceilometer-notification-agent" containerID="cri-o://67f4411d3252818b638e23b19db74ae206b5880032d39a76c8f82c4c4f8b7bc2" gracePeriod=30 Oct 11 04:47:32 crc kubenswrapper[4798]: I1011 04:47:32.756474 4798 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="2f089830-3555-4b43-bea2-68577bec1e15" containerName="ceilometer-central-agent" containerID="cri-o://a5a8ee4de65401540b4f6ebd0659fa1850ca458f63737633e539cb39e0617583" gracePeriod=30 Oct 11 04:47:32 crc kubenswrapper[4798]: I1011 04:47:32.756605 4798 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="2f089830-3555-4b43-bea2-68577bec1e15" containerName="proxy-httpd" containerID="cri-o://3f963c7257923ff3adb330ed123fadd6ba0624d6d56587d7ad6e4706c4f0071a" gracePeriod=30 Oct 11 04:47:32 crc kubenswrapper[4798]: I1011 04:47:32.756515 4798 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="2f089830-3555-4b43-bea2-68577bec1e15" containerName="sg-core" containerID="cri-o://67d13f0af49ee463f3843cfec5e278782d267e2dff785e42fd5861eb34d2df41" gracePeriod=30 Oct 11 04:47:32 crc kubenswrapper[4798]: I1011 04:47:32.805230 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.337984157 podStartE2EDuration="7.805201664s" podCreationTimestamp="2025-10-11 04:47:25 +0000 UTC" firstStartedPulling="2025-10-11 04:47:26.549381618 +0000 UTC m=+3141.885671304" lastFinishedPulling="2025-10-11 04:47:32.016599115 +0000 UTC m=+3147.352888811" observedRunningTime="2025-10-11 04:47:32.791996594 +0000 UTC m=+3148.128286280" watchObservedRunningTime="2025-10-11 04:47:32.805201664 +0000 UTC m=+3148.141491350" Oct 11 04:47:33 crc kubenswrapper[4798]: I1011 04:47:33.136552 4798 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-78d45886d-tlzfl" podUID="0c09ff0d-de2b-4e1d-b39f-f467c8b03fb7" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.242:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.242:8443: connect: connection refused" Oct 11 04:47:33 crc kubenswrapper[4798]: I1011 04:47:33.704157 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Oct 11 04:47:33 crc kubenswrapper[4798]: I1011 04:47:33.744871 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2f089830-3555-4b43-bea2-68577bec1e15-log-httpd\") pod \"2f089830-3555-4b43-bea2-68577bec1e15\" (UID: \"2f089830-3555-4b43-bea2-68577bec1e15\") " Oct 11 04:47:33 crc kubenswrapper[4798]: I1011 04:47:33.744973 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2f089830-3555-4b43-bea2-68577bec1e15-combined-ca-bundle\") pod \"2f089830-3555-4b43-bea2-68577bec1e15\" (UID: \"2f089830-3555-4b43-bea2-68577bec1e15\") " Oct 11 04:47:33 crc kubenswrapper[4798]: I1011 04:47:33.745080 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2f089830-3555-4b43-bea2-68577bec1e15-config-data\") pod \"2f089830-3555-4b43-bea2-68577bec1e15\" (UID: \"2f089830-3555-4b43-bea2-68577bec1e15\") " Oct 11 04:47:33 crc kubenswrapper[4798]: I1011 04:47:33.745219 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2f089830-3555-4b43-bea2-68577bec1e15-scripts\") pod \"2f089830-3555-4b43-bea2-68577bec1e15\" (UID: \"2f089830-3555-4b43-bea2-68577bec1e15\") " Oct 11 04:47:33 crc kubenswrapper[4798]: I1011 04:47:33.745250 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2f089830-3555-4b43-bea2-68577bec1e15-run-httpd\") pod \"2f089830-3555-4b43-bea2-68577bec1e15\" (UID: \"2f089830-3555-4b43-bea2-68577bec1e15\") " Oct 11 04:47:33 crc kubenswrapper[4798]: I1011 04:47:33.745277 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/2f089830-3555-4b43-bea2-68577bec1e15-sg-core-conf-yaml\") pod \"2f089830-3555-4b43-bea2-68577bec1e15\" (UID: \"2f089830-3555-4b43-bea2-68577bec1e15\") " Oct 11 04:47:33 crc kubenswrapper[4798]: I1011 04:47:33.745344 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nwgtb\" (UniqueName: \"kubernetes.io/projected/2f089830-3555-4b43-bea2-68577bec1e15-kube-api-access-nwgtb\") pod \"2f089830-3555-4b43-bea2-68577bec1e15\" (UID: \"2f089830-3555-4b43-bea2-68577bec1e15\") " Oct 11 04:47:33 crc kubenswrapper[4798]: I1011 04:47:33.746032 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/2f089830-3555-4b43-bea2-68577bec1e15-ceilometer-tls-certs\") pod \"2f089830-3555-4b43-bea2-68577bec1e15\" (UID: \"2f089830-3555-4b43-bea2-68577bec1e15\") " Oct 11 04:47:33 crc kubenswrapper[4798]: I1011 04:47:33.745784 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2f089830-3555-4b43-bea2-68577bec1e15-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "2f089830-3555-4b43-bea2-68577bec1e15" (UID: "2f089830-3555-4b43-bea2-68577bec1e15"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 04:47:33 crc kubenswrapper[4798]: I1011 04:47:33.746622 4798 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2f089830-3555-4b43-bea2-68577bec1e15-run-httpd\") on node \"crc\" DevicePath \"\"" Oct 11 04:47:33 crc kubenswrapper[4798]: I1011 04:47:33.746788 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2f089830-3555-4b43-bea2-68577bec1e15-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "2f089830-3555-4b43-bea2-68577bec1e15" (UID: "2f089830-3555-4b43-bea2-68577bec1e15"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 04:47:33 crc kubenswrapper[4798]: I1011 04:47:33.752955 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2f089830-3555-4b43-bea2-68577bec1e15-kube-api-access-nwgtb" (OuterVolumeSpecName: "kube-api-access-nwgtb") pod "2f089830-3555-4b43-bea2-68577bec1e15" (UID: "2f089830-3555-4b43-bea2-68577bec1e15"). InnerVolumeSpecName "kube-api-access-nwgtb". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 04:47:33 crc kubenswrapper[4798]: I1011 04:47:33.753614 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2f089830-3555-4b43-bea2-68577bec1e15-scripts" (OuterVolumeSpecName: "scripts") pod "2f089830-3555-4b43-bea2-68577bec1e15" (UID: "2f089830-3555-4b43-bea2-68577bec1e15"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:47:33 crc kubenswrapper[4798]: I1011 04:47:33.791159 4798 generic.go:334] "Generic (PLEG): container finished" podID="2f089830-3555-4b43-bea2-68577bec1e15" containerID="3f963c7257923ff3adb330ed123fadd6ba0624d6d56587d7ad6e4706c4f0071a" exitCode=0 Oct 11 04:47:33 crc kubenswrapper[4798]: I1011 04:47:33.791199 4798 generic.go:334] "Generic (PLEG): container finished" podID="2f089830-3555-4b43-bea2-68577bec1e15" containerID="67d13f0af49ee463f3843cfec5e278782d267e2dff785e42fd5861eb34d2df41" exitCode=2 Oct 11 04:47:33 crc kubenswrapper[4798]: I1011 04:47:33.791207 4798 generic.go:334] "Generic (PLEG): container finished" podID="2f089830-3555-4b43-bea2-68577bec1e15" containerID="67f4411d3252818b638e23b19db74ae206b5880032d39a76c8f82c4c4f8b7bc2" exitCode=0 Oct 11 04:47:33 crc kubenswrapper[4798]: I1011 04:47:33.791216 4798 generic.go:334] "Generic (PLEG): container finished" podID="2f089830-3555-4b43-bea2-68577bec1e15" containerID="a5a8ee4de65401540b4f6ebd0659fa1850ca458f63737633e539cb39e0617583" exitCode=0 Oct 11 04:47:33 crc kubenswrapper[4798]: I1011 04:47:33.791240 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"2f089830-3555-4b43-bea2-68577bec1e15","Type":"ContainerDied","Data":"3f963c7257923ff3adb330ed123fadd6ba0624d6d56587d7ad6e4706c4f0071a"} Oct 11 04:47:33 crc kubenswrapper[4798]: I1011 04:47:33.791271 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"2f089830-3555-4b43-bea2-68577bec1e15","Type":"ContainerDied","Data":"67d13f0af49ee463f3843cfec5e278782d267e2dff785e42fd5861eb34d2df41"} Oct 11 04:47:33 crc kubenswrapper[4798]: I1011 04:47:33.791280 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"2f089830-3555-4b43-bea2-68577bec1e15","Type":"ContainerDied","Data":"67f4411d3252818b638e23b19db74ae206b5880032d39a76c8f82c4c4f8b7bc2"} Oct 11 04:47:33 crc kubenswrapper[4798]: I1011 04:47:33.791290 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"2f089830-3555-4b43-bea2-68577bec1e15","Type":"ContainerDied","Data":"a5a8ee4de65401540b4f6ebd0659fa1850ca458f63737633e539cb39e0617583"} Oct 11 04:47:33 crc kubenswrapper[4798]: I1011 04:47:33.791298 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"2f089830-3555-4b43-bea2-68577bec1e15","Type":"ContainerDied","Data":"5dfe8e265fa3c32000882abd008260202dca04a4b6acf65c2e20ada89fda104d"} Oct 11 04:47:33 crc kubenswrapper[4798]: I1011 04:47:33.791316 4798 scope.go:117] "RemoveContainer" containerID="3f963c7257923ff3adb330ed123fadd6ba0624d6d56587d7ad6e4706c4f0071a" Oct 11 04:47:33 crc kubenswrapper[4798]: I1011 04:47:33.793146 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Oct 11 04:47:33 crc kubenswrapper[4798]: I1011 04:47:33.795015 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2f089830-3555-4b43-bea2-68577bec1e15-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "2f089830-3555-4b43-bea2-68577bec1e15" (UID: "2f089830-3555-4b43-bea2-68577bec1e15"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:47:33 crc kubenswrapper[4798]: I1011 04:47:33.849463 4798 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2f089830-3555-4b43-bea2-68577bec1e15-log-httpd\") on node \"crc\" DevicePath \"\"" Oct 11 04:47:33 crc kubenswrapper[4798]: I1011 04:47:33.849501 4798 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2f089830-3555-4b43-bea2-68577bec1e15-scripts\") on node \"crc\" DevicePath \"\"" Oct 11 04:47:33 crc kubenswrapper[4798]: I1011 04:47:33.849514 4798 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/2f089830-3555-4b43-bea2-68577bec1e15-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Oct 11 04:47:33 crc kubenswrapper[4798]: I1011 04:47:33.849527 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nwgtb\" (UniqueName: \"kubernetes.io/projected/2f089830-3555-4b43-bea2-68577bec1e15-kube-api-access-nwgtb\") on node \"crc\" DevicePath \"\"" Oct 11 04:47:33 crc kubenswrapper[4798]: I1011 04:47:33.851027 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2f089830-3555-4b43-bea2-68577bec1e15-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "2f089830-3555-4b43-bea2-68577bec1e15" (UID: "2f089830-3555-4b43-bea2-68577bec1e15"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:47:33 crc kubenswrapper[4798]: I1011 04:47:33.851656 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2f089830-3555-4b43-bea2-68577bec1e15-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "2f089830-3555-4b43-bea2-68577bec1e15" (UID: "2f089830-3555-4b43-bea2-68577bec1e15"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:47:33 crc kubenswrapper[4798]: I1011 04:47:33.875204 4798 scope.go:117] "RemoveContainer" containerID="67d13f0af49ee463f3843cfec5e278782d267e2dff785e42fd5861eb34d2df41" Oct 11 04:47:33 crc kubenswrapper[4798]: I1011 04:47:33.901687 4798 scope.go:117] "RemoveContainer" containerID="67f4411d3252818b638e23b19db74ae206b5880032d39a76c8f82c4c4f8b7bc2" Oct 11 04:47:33 crc kubenswrapper[4798]: I1011 04:47:33.902381 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2f089830-3555-4b43-bea2-68577bec1e15-config-data" (OuterVolumeSpecName: "config-data") pod "2f089830-3555-4b43-bea2-68577bec1e15" (UID: "2f089830-3555-4b43-bea2-68577bec1e15"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:47:33 crc kubenswrapper[4798]: I1011 04:47:33.929566 4798 scope.go:117] "RemoveContainer" containerID="a5a8ee4de65401540b4f6ebd0659fa1850ca458f63737633e539cb39e0617583" Oct 11 04:47:33 crc kubenswrapper[4798]: I1011 04:47:33.952216 4798 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2f089830-3555-4b43-bea2-68577bec1e15-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 11 04:47:33 crc kubenswrapper[4798]: I1011 04:47:33.952474 4798 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2f089830-3555-4b43-bea2-68577bec1e15-config-data\") on node \"crc\" DevicePath \"\"" Oct 11 04:47:33 crc kubenswrapper[4798]: I1011 04:47:33.952492 4798 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/2f089830-3555-4b43-bea2-68577bec1e15-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Oct 11 04:47:33 crc kubenswrapper[4798]: I1011 04:47:33.955841 4798 scope.go:117] "RemoveContainer" containerID="3f963c7257923ff3adb330ed123fadd6ba0624d6d56587d7ad6e4706c4f0071a" Oct 11 04:47:33 crc kubenswrapper[4798]: E1011 04:47:33.956573 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3f963c7257923ff3adb330ed123fadd6ba0624d6d56587d7ad6e4706c4f0071a\": container with ID starting with 3f963c7257923ff3adb330ed123fadd6ba0624d6d56587d7ad6e4706c4f0071a not found: ID does not exist" containerID="3f963c7257923ff3adb330ed123fadd6ba0624d6d56587d7ad6e4706c4f0071a" Oct 11 04:47:33 crc kubenswrapper[4798]: I1011 04:47:33.956610 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3f963c7257923ff3adb330ed123fadd6ba0624d6d56587d7ad6e4706c4f0071a"} err="failed to get container status \"3f963c7257923ff3adb330ed123fadd6ba0624d6d56587d7ad6e4706c4f0071a\": rpc error: code = NotFound desc = could not find container \"3f963c7257923ff3adb330ed123fadd6ba0624d6d56587d7ad6e4706c4f0071a\": container with ID starting with 3f963c7257923ff3adb330ed123fadd6ba0624d6d56587d7ad6e4706c4f0071a not found: ID does not exist" Oct 11 04:47:33 crc kubenswrapper[4798]: I1011 04:47:33.956635 4798 scope.go:117] "RemoveContainer" containerID="67d13f0af49ee463f3843cfec5e278782d267e2dff785e42fd5861eb34d2df41" Oct 11 04:47:33 crc kubenswrapper[4798]: E1011 04:47:33.957098 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"67d13f0af49ee463f3843cfec5e278782d267e2dff785e42fd5861eb34d2df41\": container with ID starting with 67d13f0af49ee463f3843cfec5e278782d267e2dff785e42fd5861eb34d2df41 not found: ID does not exist" containerID="67d13f0af49ee463f3843cfec5e278782d267e2dff785e42fd5861eb34d2df41" Oct 11 04:47:33 crc kubenswrapper[4798]: I1011 04:47:33.957121 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"67d13f0af49ee463f3843cfec5e278782d267e2dff785e42fd5861eb34d2df41"} err="failed to get container status \"67d13f0af49ee463f3843cfec5e278782d267e2dff785e42fd5861eb34d2df41\": rpc error: code = NotFound desc = could not find container \"67d13f0af49ee463f3843cfec5e278782d267e2dff785e42fd5861eb34d2df41\": container with ID starting with 67d13f0af49ee463f3843cfec5e278782d267e2dff785e42fd5861eb34d2df41 not found: ID does not exist" Oct 11 04:47:33 crc kubenswrapper[4798]: I1011 04:47:33.957133 4798 scope.go:117] "RemoveContainer" containerID="67f4411d3252818b638e23b19db74ae206b5880032d39a76c8f82c4c4f8b7bc2" Oct 11 04:47:33 crc kubenswrapper[4798]: E1011 04:47:33.957515 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"67f4411d3252818b638e23b19db74ae206b5880032d39a76c8f82c4c4f8b7bc2\": container with ID starting with 67f4411d3252818b638e23b19db74ae206b5880032d39a76c8f82c4c4f8b7bc2 not found: ID does not exist" containerID="67f4411d3252818b638e23b19db74ae206b5880032d39a76c8f82c4c4f8b7bc2" Oct 11 04:47:33 crc kubenswrapper[4798]: I1011 04:47:33.957544 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"67f4411d3252818b638e23b19db74ae206b5880032d39a76c8f82c4c4f8b7bc2"} err="failed to get container status \"67f4411d3252818b638e23b19db74ae206b5880032d39a76c8f82c4c4f8b7bc2\": rpc error: code = NotFound desc = could not find container \"67f4411d3252818b638e23b19db74ae206b5880032d39a76c8f82c4c4f8b7bc2\": container with ID starting with 67f4411d3252818b638e23b19db74ae206b5880032d39a76c8f82c4c4f8b7bc2 not found: ID does not exist" Oct 11 04:47:33 crc kubenswrapper[4798]: I1011 04:47:33.957563 4798 scope.go:117] "RemoveContainer" containerID="a5a8ee4de65401540b4f6ebd0659fa1850ca458f63737633e539cb39e0617583" Oct 11 04:47:33 crc kubenswrapper[4798]: E1011 04:47:33.958704 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a5a8ee4de65401540b4f6ebd0659fa1850ca458f63737633e539cb39e0617583\": container with ID starting with a5a8ee4de65401540b4f6ebd0659fa1850ca458f63737633e539cb39e0617583 not found: ID does not exist" containerID="a5a8ee4de65401540b4f6ebd0659fa1850ca458f63737633e539cb39e0617583" Oct 11 04:47:33 crc kubenswrapper[4798]: I1011 04:47:33.958738 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a5a8ee4de65401540b4f6ebd0659fa1850ca458f63737633e539cb39e0617583"} err="failed to get container status \"a5a8ee4de65401540b4f6ebd0659fa1850ca458f63737633e539cb39e0617583\": rpc error: code = NotFound desc = could not find container \"a5a8ee4de65401540b4f6ebd0659fa1850ca458f63737633e539cb39e0617583\": container with ID starting with a5a8ee4de65401540b4f6ebd0659fa1850ca458f63737633e539cb39e0617583 not found: ID does not exist" Oct 11 04:47:33 crc kubenswrapper[4798]: I1011 04:47:33.958760 4798 scope.go:117] "RemoveContainer" containerID="3f963c7257923ff3adb330ed123fadd6ba0624d6d56587d7ad6e4706c4f0071a" Oct 11 04:47:33 crc kubenswrapper[4798]: I1011 04:47:33.959291 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3f963c7257923ff3adb330ed123fadd6ba0624d6d56587d7ad6e4706c4f0071a"} err="failed to get container status \"3f963c7257923ff3adb330ed123fadd6ba0624d6d56587d7ad6e4706c4f0071a\": rpc error: code = NotFound desc = could not find container \"3f963c7257923ff3adb330ed123fadd6ba0624d6d56587d7ad6e4706c4f0071a\": container with ID starting with 3f963c7257923ff3adb330ed123fadd6ba0624d6d56587d7ad6e4706c4f0071a not found: ID does not exist" Oct 11 04:47:33 crc kubenswrapper[4798]: I1011 04:47:33.959365 4798 scope.go:117] "RemoveContainer" containerID="67d13f0af49ee463f3843cfec5e278782d267e2dff785e42fd5861eb34d2df41" Oct 11 04:47:33 crc kubenswrapper[4798]: I1011 04:47:33.959928 4798 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/manila-scheduler-0" Oct 11 04:47:33 crc kubenswrapper[4798]: I1011 04:47:33.959989 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"67d13f0af49ee463f3843cfec5e278782d267e2dff785e42fd5861eb34d2df41"} err="failed to get container status \"67d13f0af49ee463f3843cfec5e278782d267e2dff785e42fd5861eb34d2df41\": rpc error: code = NotFound desc = could not find container \"67d13f0af49ee463f3843cfec5e278782d267e2dff785e42fd5861eb34d2df41\": container with ID starting with 67d13f0af49ee463f3843cfec5e278782d267e2dff785e42fd5861eb34d2df41 not found: ID does not exist" Oct 11 04:47:33 crc kubenswrapper[4798]: I1011 04:47:33.960011 4798 scope.go:117] "RemoveContainer" containerID="67f4411d3252818b638e23b19db74ae206b5880032d39a76c8f82c4c4f8b7bc2" Oct 11 04:47:33 crc kubenswrapper[4798]: I1011 04:47:33.960450 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"67f4411d3252818b638e23b19db74ae206b5880032d39a76c8f82c4c4f8b7bc2"} err="failed to get container status \"67f4411d3252818b638e23b19db74ae206b5880032d39a76c8f82c4c4f8b7bc2\": rpc error: code = NotFound desc = could not find container \"67f4411d3252818b638e23b19db74ae206b5880032d39a76c8f82c4c4f8b7bc2\": container with ID starting with 67f4411d3252818b638e23b19db74ae206b5880032d39a76c8f82c4c4f8b7bc2 not found: ID does not exist" Oct 11 04:47:33 crc kubenswrapper[4798]: I1011 04:47:33.960477 4798 scope.go:117] "RemoveContainer" containerID="a5a8ee4de65401540b4f6ebd0659fa1850ca458f63737633e539cb39e0617583" Oct 11 04:47:33 crc kubenswrapper[4798]: I1011 04:47:33.960732 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a5a8ee4de65401540b4f6ebd0659fa1850ca458f63737633e539cb39e0617583"} err="failed to get container status \"a5a8ee4de65401540b4f6ebd0659fa1850ca458f63737633e539cb39e0617583\": rpc error: code = NotFound desc = could not find container \"a5a8ee4de65401540b4f6ebd0659fa1850ca458f63737633e539cb39e0617583\": container with ID starting with a5a8ee4de65401540b4f6ebd0659fa1850ca458f63737633e539cb39e0617583 not found: ID does not exist" Oct 11 04:47:33 crc kubenswrapper[4798]: I1011 04:47:33.960762 4798 scope.go:117] "RemoveContainer" containerID="3f963c7257923ff3adb330ed123fadd6ba0624d6d56587d7ad6e4706c4f0071a" Oct 11 04:47:33 crc kubenswrapper[4798]: I1011 04:47:33.962910 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3f963c7257923ff3adb330ed123fadd6ba0624d6d56587d7ad6e4706c4f0071a"} err="failed to get container status \"3f963c7257923ff3adb330ed123fadd6ba0624d6d56587d7ad6e4706c4f0071a\": rpc error: code = NotFound desc = could not find container \"3f963c7257923ff3adb330ed123fadd6ba0624d6d56587d7ad6e4706c4f0071a\": container with ID starting with 3f963c7257923ff3adb330ed123fadd6ba0624d6d56587d7ad6e4706c4f0071a not found: ID does not exist" Oct 11 04:47:33 crc kubenswrapper[4798]: I1011 04:47:33.962945 4798 scope.go:117] "RemoveContainer" containerID="67d13f0af49ee463f3843cfec5e278782d267e2dff785e42fd5861eb34d2df41" Oct 11 04:47:33 crc kubenswrapper[4798]: I1011 04:47:33.963300 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"67d13f0af49ee463f3843cfec5e278782d267e2dff785e42fd5861eb34d2df41"} err="failed to get container status \"67d13f0af49ee463f3843cfec5e278782d267e2dff785e42fd5861eb34d2df41\": rpc error: code = NotFound desc = could not find container \"67d13f0af49ee463f3843cfec5e278782d267e2dff785e42fd5861eb34d2df41\": container with ID starting with 67d13f0af49ee463f3843cfec5e278782d267e2dff785e42fd5861eb34d2df41 not found: ID does not exist" Oct 11 04:47:33 crc kubenswrapper[4798]: I1011 04:47:33.963330 4798 scope.go:117] "RemoveContainer" containerID="67f4411d3252818b638e23b19db74ae206b5880032d39a76c8f82c4c4f8b7bc2" Oct 11 04:47:33 crc kubenswrapper[4798]: I1011 04:47:33.963557 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"67f4411d3252818b638e23b19db74ae206b5880032d39a76c8f82c4c4f8b7bc2"} err="failed to get container status \"67f4411d3252818b638e23b19db74ae206b5880032d39a76c8f82c4c4f8b7bc2\": rpc error: code = NotFound desc = could not find container \"67f4411d3252818b638e23b19db74ae206b5880032d39a76c8f82c4c4f8b7bc2\": container with ID starting with 67f4411d3252818b638e23b19db74ae206b5880032d39a76c8f82c4c4f8b7bc2 not found: ID does not exist" Oct 11 04:47:33 crc kubenswrapper[4798]: I1011 04:47:33.963618 4798 scope.go:117] "RemoveContainer" containerID="a5a8ee4de65401540b4f6ebd0659fa1850ca458f63737633e539cb39e0617583" Oct 11 04:47:33 crc kubenswrapper[4798]: I1011 04:47:33.963915 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a5a8ee4de65401540b4f6ebd0659fa1850ca458f63737633e539cb39e0617583"} err="failed to get container status \"a5a8ee4de65401540b4f6ebd0659fa1850ca458f63737633e539cb39e0617583\": rpc error: code = NotFound desc = could not find container \"a5a8ee4de65401540b4f6ebd0659fa1850ca458f63737633e539cb39e0617583\": container with ID starting with a5a8ee4de65401540b4f6ebd0659fa1850ca458f63737633e539cb39e0617583 not found: ID does not exist" Oct 11 04:47:33 crc kubenswrapper[4798]: I1011 04:47:33.963973 4798 scope.go:117] "RemoveContainer" containerID="3f963c7257923ff3adb330ed123fadd6ba0624d6d56587d7ad6e4706c4f0071a" Oct 11 04:47:33 crc kubenswrapper[4798]: I1011 04:47:33.964510 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3f963c7257923ff3adb330ed123fadd6ba0624d6d56587d7ad6e4706c4f0071a"} err="failed to get container status \"3f963c7257923ff3adb330ed123fadd6ba0624d6d56587d7ad6e4706c4f0071a\": rpc error: code = NotFound desc = could not find container \"3f963c7257923ff3adb330ed123fadd6ba0624d6d56587d7ad6e4706c4f0071a\": container with ID starting with 3f963c7257923ff3adb330ed123fadd6ba0624d6d56587d7ad6e4706c4f0071a not found: ID does not exist" Oct 11 04:47:33 crc kubenswrapper[4798]: I1011 04:47:33.964544 4798 scope.go:117] "RemoveContainer" containerID="67d13f0af49ee463f3843cfec5e278782d267e2dff785e42fd5861eb34d2df41" Oct 11 04:47:33 crc kubenswrapper[4798]: I1011 04:47:33.964848 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"67d13f0af49ee463f3843cfec5e278782d267e2dff785e42fd5861eb34d2df41"} err="failed to get container status \"67d13f0af49ee463f3843cfec5e278782d267e2dff785e42fd5861eb34d2df41\": rpc error: code = NotFound desc = could not find container \"67d13f0af49ee463f3843cfec5e278782d267e2dff785e42fd5861eb34d2df41\": container with ID starting with 67d13f0af49ee463f3843cfec5e278782d267e2dff785e42fd5861eb34d2df41 not found: ID does not exist" Oct 11 04:47:33 crc kubenswrapper[4798]: I1011 04:47:33.964878 4798 scope.go:117] "RemoveContainer" containerID="67f4411d3252818b638e23b19db74ae206b5880032d39a76c8f82c4c4f8b7bc2" Oct 11 04:47:33 crc kubenswrapper[4798]: I1011 04:47:33.965444 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"67f4411d3252818b638e23b19db74ae206b5880032d39a76c8f82c4c4f8b7bc2"} err="failed to get container status \"67f4411d3252818b638e23b19db74ae206b5880032d39a76c8f82c4c4f8b7bc2\": rpc error: code = NotFound desc = could not find container \"67f4411d3252818b638e23b19db74ae206b5880032d39a76c8f82c4c4f8b7bc2\": container with ID starting with 67f4411d3252818b638e23b19db74ae206b5880032d39a76c8f82c4c4f8b7bc2 not found: ID does not exist" Oct 11 04:47:33 crc kubenswrapper[4798]: I1011 04:47:33.965483 4798 scope.go:117] "RemoveContainer" containerID="a5a8ee4de65401540b4f6ebd0659fa1850ca458f63737633e539cb39e0617583" Oct 11 04:47:33 crc kubenswrapper[4798]: I1011 04:47:33.965772 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a5a8ee4de65401540b4f6ebd0659fa1850ca458f63737633e539cb39e0617583"} err="failed to get container status \"a5a8ee4de65401540b4f6ebd0659fa1850ca458f63737633e539cb39e0617583\": rpc error: code = NotFound desc = could not find container \"a5a8ee4de65401540b4f6ebd0659fa1850ca458f63737633e539cb39e0617583\": container with ID starting with a5a8ee4de65401540b4f6ebd0659fa1850ca458f63737633e539cb39e0617583 not found: ID does not exist" Oct 11 04:47:34 crc kubenswrapper[4798]: I1011 04:47:34.014866 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/manila-scheduler-0"] Oct 11 04:47:34 crc kubenswrapper[4798]: I1011 04:47:34.168677 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Oct 11 04:47:34 crc kubenswrapper[4798]: I1011 04:47:34.182195 4798 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Oct 11 04:47:34 crc kubenswrapper[4798]: I1011 04:47:34.214007 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Oct 11 04:47:34 crc kubenswrapper[4798]: E1011 04:47:34.214645 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2f089830-3555-4b43-bea2-68577bec1e15" containerName="ceilometer-central-agent" Oct 11 04:47:34 crc kubenswrapper[4798]: I1011 04:47:34.214667 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="2f089830-3555-4b43-bea2-68577bec1e15" containerName="ceilometer-central-agent" Oct 11 04:47:34 crc kubenswrapper[4798]: E1011 04:47:34.214702 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2f089830-3555-4b43-bea2-68577bec1e15" containerName="ceilometer-notification-agent" Oct 11 04:47:34 crc kubenswrapper[4798]: I1011 04:47:34.214710 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="2f089830-3555-4b43-bea2-68577bec1e15" containerName="ceilometer-notification-agent" Oct 11 04:47:34 crc kubenswrapper[4798]: E1011 04:47:34.214724 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2f089830-3555-4b43-bea2-68577bec1e15" containerName="sg-core" Oct 11 04:47:34 crc kubenswrapper[4798]: I1011 04:47:34.214730 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="2f089830-3555-4b43-bea2-68577bec1e15" containerName="sg-core" Oct 11 04:47:34 crc kubenswrapper[4798]: E1011 04:47:34.214752 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2f089830-3555-4b43-bea2-68577bec1e15" containerName="proxy-httpd" Oct 11 04:47:34 crc kubenswrapper[4798]: I1011 04:47:34.214758 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="2f089830-3555-4b43-bea2-68577bec1e15" containerName="proxy-httpd" Oct 11 04:47:34 crc kubenswrapper[4798]: I1011 04:47:34.214973 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="2f089830-3555-4b43-bea2-68577bec1e15" containerName="proxy-httpd" Oct 11 04:47:34 crc kubenswrapper[4798]: I1011 04:47:34.214988 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="2f089830-3555-4b43-bea2-68577bec1e15" containerName="ceilometer-notification-agent" Oct 11 04:47:34 crc kubenswrapper[4798]: I1011 04:47:34.215010 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="2f089830-3555-4b43-bea2-68577bec1e15" containerName="sg-core" Oct 11 04:47:34 crc kubenswrapper[4798]: I1011 04:47:34.215037 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="2f089830-3555-4b43-bea2-68577bec1e15" containerName="ceilometer-central-agent" Oct 11 04:47:34 crc kubenswrapper[4798]: I1011 04:47:34.221499 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Oct 11 04:47:34 crc kubenswrapper[4798]: I1011 04:47:34.224315 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Oct 11 04:47:34 crc kubenswrapper[4798]: I1011 04:47:34.225567 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Oct 11 04:47:34 crc kubenswrapper[4798]: I1011 04:47:34.226000 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Oct 11 04:47:34 crc kubenswrapper[4798]: I1011 04:47:34.226152 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Oct 11 04:47:34 crc kubenswrapper[4798]: I1011 04:47:34.264369 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f1864cfd-61a1-4cb5-ae10-af539f50abc4-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"f1864cfd-61a1-4cb5-ae10-af539f50abc4\") " pod="openstack/ceilometer-0" Oct 11 04:47:34 crc kubenswrapper[4798]: I1011 04:47:34.264780 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f1864cfd-61a1-4cb5-ae10-af539f50abc4-scripts\") pod \"ceilometer-0\" (UID: \"f1864cfd-61a1-4cb5-ae10-af539f50abc4\") " pod="openstack/ceilometer-0" Oct 11 04:47:34 crc kubenswrapper[4798]: I1011 04:47:34.264851 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f1864cfd-61a1-4cb5-ae10-af539f50abc4-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"f1864cfd-61a1-4cb5-ae10-af539f50abc4\") " pod="openstack/ceilometer-0" Oct 11 04:47:34 crc kubenswrapper[4798]: I1011 04:47:34.264953 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qwdh2\" (UniqueName: \"kubernetes.io/projected/f1864cfd-61a1-4cb5-ae10-af539f50abc4-kube-api-access-qwdh2\") pod \"ceilometer-0\" (UID: \"f1864cfd-61a1-4cb5-ae10-af539f50abc4\") " pod="openstack/ceilometer-0" Oct 11 04:47:34 crc kubenswrapper[4798]: I1011 04:47:34.264995 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f1864cfd-61a1-4cb5-ae10-af539f50abc4-log-httpd\") pod \"ceilometer-0\" (UID: \"f1864cfd-61a1-4cb5-ae10-af539f50abc4\") " pod="openstack/ceilometer-0" Oct 11 04:47:34 crc kubenswrapper[4798]: I1011 04:47:34.265108 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f1864cfd-61a1-4cb5-ae10-af539f50abc4-config-data\") pod \"ceilometer-0\" (UID: \"f1864cfd-61a1-4cb5-ae10-af539f50abc4\") " pod="openstack/ceilometer-0" Oct 11 04:47:34 crc kubenswrapper[4798]: I1011 04:47:34.265299 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/f1864cfd-61a1-4cb5-ae10-af539f50abc4-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"f1864cfd-61a1-4cb5-ae10-af539f50abc4\") " pod="openstack/ceilometer-0" Oct 11 04:47:34 crc kubenswrapper[4798]: I1011 04:47:34.265673 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f1864cfd-61a1-4cb5-ae10-af539f50abc4-run-httpd\") pod \"ceilometer-0\" (UID: \"f1864cfd-61a1-4cb5-ae10-af539f50abc4\") " pod="openstack/ceilometer-0" Oct 11 04:47:34 crc kubenswrapper[4798]: I1011 04:47:34.368666 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f1864cfd-61a1-4cb5-ae10-af539f50abc4-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"f1864cfd-61a1-4cb5-ae10-af539f50abc4\") " pod="openstack/ceilometer-0" Oct 11 04:47:34 crc kubenswrapper[4798]: I1011 04:47:34.368802 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f1864cfd-61a1-4cb5-ae10-af539f50abc4-scripts\") pod \"ceilometer-0\" (UID: \"f1864cfd-61a1-4cb5-ae10-af539f50abc4\") " pod="openstack/ceilometer-0" Oct 11 04:47:34 crc kubenswrapper[4798]: I1011 04:47:34.368834 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f1864cfd-61a1-4cb5-ae10-af539f50abc4-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"f1864cfd-61a1-4cb5-ae10-af539f50abc4\") " pod="openstack/ceilometer-0" Oct 11 04:47:34 crc kubenswrapper[4798]: I1011 04:47:34.368869 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qwdh2\" (UniqueName: \"kubernetes.io/projected/f1864cfd-61a1-4cb5-ae10-af539f50abc4-kube-api-access-qwdh2\") pod \"ceilometer-0\" (UID: \"f1864cfd-61a1-4cb5-ae10-af539f50abc4\") " pod="openstack/ceilometer-0" Oct 11 04:47:34 crc kubenswrapper[4798]: I1011 04:47:34.368899 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f1864cfd-61a1-4cb5-ae10-af539f50abc4-log-httpd\") pod \"ceilometer-0\" (UID: \"f1864cfd-61a1-4cb5-ae10-af539f50abc4\") " pod="openstack/ceilometer-0" Oct 11 04:47:34 crc kubenswrapper[4798]: I1011 04:47:34.368938 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f1864cfd-61a1-4cb5-ae10-af539f50abc4-config-data\") pod \"ceilometer-0\" (UID: \"f1864cfd-61a1-4cb5-ae10-af539f50abc4\") " pod="openstack/ceilometer-0" Oct 11 04:47:34 crc kubenswrapper[4798]: I1011 04:47:34.368956 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/f1864cfd-61a1-4cb5-ae10-af539f50abc4-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"f1864cfd-61a1-4cb5-ae10-af539f50abc4\") " pod="openstack/ceilometer-0" Oct 11 04:47:34 crc kubenswrapper[4798]: I1011 04:47:34.368977 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f1864cfd-61a1-4cb5-ae10-af539f50abc4-run-httpd\") pod \"ceilometer-0\" (UID: \"f1864cfd-61a1-4cb5-ae10-af539f50abc4\") " pod="openstack/ceilometer-0" Oct 11 04:47:34 crc kubenswrapper[4798]: I1011 04:47:34.369578 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f1864cfd-61a1-4cb5-ae10-af539f50abc4-run-httpd\") pod \"ceilometer-0\" (UID: \"f1864cfd-61a1-4cb5-ae10-af539f50abc4\") " pod="openstack/ceilometer-0" Oct 11 04:47:34 crc kubenswrapper[4798]: I1011 04:47:34.369814 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f1864cfd-61a1-4cb5-ae10-af539f50abc4-log-httpd\") pod \"ceilometer-0\" (UID: \"f1864cfd-61a1-4cb5-ae10-af539f50abc4\") " pod="openstack/ceilometer-0" Oct 11 04:47:34 crc kubenswrapper[4798]: I1011 04:47:34.372739 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f1864cfd-61a1-4cb5-ae10-af539f50abc4-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"f1864cfd-61a1-4cb5-ae10-af539f50abc4\") " pod="openstack/ceilometer-0" Oct 11 04:47:34 crc kubenswrapper[4798]: I1011 04:47:34.373024 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f1864cfd-61a1-4cb5-ae10-af539f50abc4-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"f1864cfd-61a1-4cb5-ae10-af539f50abc4\") " pod="openstack/ceilometer-0" Oct 11 04:47:34 crc kubenswrapper[4798]: I1011 04:47:34.374451 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f1864cfd-61a1-4cb5-ae10-af539f50abc4-scripts\") pod \"ceilometer-0\" (UID: \"f1864cfd-61a1-4cb5-ae10-af539f50abc4\") " pod="openstack/ceilometer-0" Oct 11 04:47:34 crc kubenswrapper[4798]: I1011 04:47:34.375625 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/f1864cfd-61a1-4cb5-ae10-af539f50abc4-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"f1864cfd-61a1-4cb5-ae10-af539f50abc4\") " pod="openstack/ceilometer-0" Oct 11 04:47:34 crc kubenswrapper[4798]: I1011 04:47:34.384076 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f1864cfd-61a1-4cb5-ae10-af539f50abc4-config-data\") pod \"ceilometer-0\" (UID: \"f1864cfd-61a1-4cb5-ae10-af539f50abc4\") " pod="openstack/ceilometer-0" Oct 11 04:47:34 crc kubenswrapper[4798]: I1011 04:47:34.393801 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qwdh2\" (UniqueName: \"kubernetes.io/projected/f1864cfd-61a1-4cb5-ae10-af539f50abc4-kube-api-access-qwdh2\") pod \"ceilometer-0\" (UID: \"f1864cfd-61a1-4cb5-ae10-af539f50abc4\") " pod="openstack/ceilometer-0" Oct 11 04:47:34 crc kubenswrapper[4798]: I1011 04:47:34.574538 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Oct 11 04:47:34 crc kubenswrapper[4798]: I1011 04:47:34.811140 4798 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/manila-scheduler-0" podUID="52f182f2-1e00-4c5d-9101-dff47f7c7b85" containerName="manila-scheduler" containerID="cri-o://d9589fd1679892ecd011355a006f1e4c35bfe2c0a069c79bf8402ccdc209fb7b" gracePeriod=30 Oct 11 04:47:34 crc kubenswrapper[4798]: I1011 04:47:34.811330 4798 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/manila-scheduler-0" podUID="52f182f2-1e00-4c5d-9101-dff47f7c7b85" containerName="probe" containerID="cri-o://965d3bde34ca8a3bf5bac23e703476a3357bdd8e6b3204be80e54f0960c02648" gracePeriod=30 Oct 11 04:47:35 crc kubenswrapper[4798]: I1011 04:47:35.128933 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Oct 11 04:47:35 crc kubenswrapper[4798]: W1011 04:47:35.148569 4798 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf1864cfd_61a1_4cb5_ae10_af539f50abc4.slice/crio-98c9bb577f47b05ef1178588b6044ffb4045644fb2d2976198ff158e3670f111 WatchSource:0}: Error finding container 98c9bb577f47b05ef1178588b6044ffb4045644fb2d2976198ff158e3670f111: Status 404 returned error can't find the container with id 98c9bb577f47b05ef1178588b6044ffb4045644fb2d2976198ff158e3670f111 Oct 11 04:47:35 crc kubenswrapper[4798]: I1011 04:47:35.444218 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2f089830-3555-4b43-bea2-68577bec1e15" path="/var/lib/kubelet/pods/2f089830-3555-4b43-bea2-68577bec1e15/volumes" Oct 11 04:47:35 crc kubenswrapper[4798]: I1011 04:47:35.827497 4798 generic.go:334] "Generic (PLEG): container finished" podID="52f182f2-1e00-4c5d-9101-dff47f7c7b85" containerID="965d3bde34ca8a3bf5bac23e703476a3357bdd8e6b3204be80e54f0960c02648" exitCode=0 Oct 11 04:47:35 crc kubenswrapper[4798]: I1011 04:47:35.827576 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-scheduler-0" event={"ID":"52f182f2-1e00-4c5d-9101-dff47f7c7b85","Type":"ContainerDied","Data":"965d3bde34ca8a3bf5bac23e703476a3357bdd8e6b3204be80e54f0960c02648"} Oct 11 04:47:35 crc kubenswrapper[4798]: I1011 04:47:35.830212 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f1864cfd-61a1-4cb5-ae10-af539f50abc4","Type":"ContainerStarted","Data":"98c9bb577f47b05ef1178588b6044ffb4045644fb2d2976198ff158e3670f111"} Oct 11 04:47:36 crc kubenswrapper[4798]: I1011 04:47:36.842089 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f1864cfd-61a1-4cb5-ae10-af539f50abc4","Type":"ContainerStarted","Data":"3f12cfa48f3b933968d55e619aca2c1ae9b573d8187362caab8032e6b924de86"} Oct 11 04:47:36 crc kubenswrapper[4798]: I1011 04:47:36.842622 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f1864cfd-61a1-4cb5-ae10-af539f50abc4","Type":"ContainerStarted","Data":"0afe8700f245b4c202428a35f3f765a9184e18f5e6dd332e7dd999904836f263"} Oct 11 04:47:37 crc kubenswrapper[4798]: I1011 04:47:37.862241 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f1864cfd-61a1-4cb5-ae10-af539f50abc4","Type":"ContainerStarted","Data":"8f501637473e8b1dff1dbfe9356c4dd8d8e084b4032e898a7b10068aae4a4a08"} Oct 11 04:47:38 crc kubenswrapper[4798]: I1011 04:47:38.883212 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f1864cfd-61a1-4cb5-ae10-af539f50abc4","Type":"ContainerStarted","Data":"4505bba6a7cd02e8397e67ab7c856ab25806935a253d09a22cf85121546a3b15"} Oct 11 04:47:38 crc kubenswrapper[4798]: I1011 04:47:38.884764 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Oct 11 04:47:39 crc kubenswrapper[4798]: I1011 04:47:39.898893 4798 generic.go:334] "Generic (PLEG): container finished" podID="52f182f2-1e00-4c5d-9101-dff47f7c7b85" containerID="d9589fd1679892ecd011355a006f1e4c35bfe2c0a069c79bf8402ccdc209fb7b" exitCode=0 Oct 11 04:47:39 crc kubenswrapper[4798]: I1011 04:47:39.900351 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-scheduler-0" event={"ID":"52f182f2-1e00-4c5d-9101-dff47f7c7b85","Type":"ContainerDied","Data":"d9589fd1679892ecd011355a006f1e4c35bfe2c0a069c79bf8402ccdc209fb7b"} Oct 11 04:47:40 crc kubenswrapper[4798]: I1011 04:47:40.087015 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/manila-scheduler-0" Oct 11 04:47:40 crc kubenswrapper[4798]: I1011 04:47:40.110703 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.968574253 podStartE2EDuration="6.110682859s" podCreationTimestamp="2025-10-11 04:47:34 +0000 UTC" firstStartedPulling="2025-10-11 04:47:35.151989464 +0000 UTC m=+3150.488279180" lastFinishedPulling="2025-10-11 04:47:38.2940981 +0000 UTC m=+3153.630387786" observedRunningTime="2025-10-11 04:47:38.915693667 +0000 UTC m=+3154.251983383" watchObservedRunningTime="2025-10-11 04:47:40.110682859 +0000 UTC m=+3155.446972545" Oct 11 04:47:40 crc kubenswrapper[4798]: I1011 04:47:40.129871 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/52f182f2-1e00-4c5d-9101-dff47f7c7b85-etc-machine-id\") pod \"52f182f2-1e00-4c5d-9101-dff47f7c7b85\" (UID: \"52f182f2-1e00-4c5d-9101-dff47f7c7b85\") " Oct 11 04:47:40 crc kubenswrapper[4798]: I1011 04:47:40.130041 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/52f182f2-1e00-4c5d-9101-dff47f7c7b85-scripts\") pod \"52f182f2-1e00-4c5d-9101-dff47f7c7b85\" (UID: \"52f182f2-1e00-4c5d-9101-dff47f7c7b85\") " Oct 11 04:47:40 crc kubenswrapper[4798]: I1011 04:47:40.130218 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/52f182f2-1e00-4c5d-9101-dff47f7c7b85-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "52f182f2-1e00-4c5d-9101-dff47f7c7b85" (UID: "52f182f2-1e00-4c5d-9101-dff47f7c7b85"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 11 04:47:40 crc kubenswrapper[4798]: I1011 04:47:40.130294 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/52f182f2-1e00-4c5d-9101-dff47f7c7b85-config-data-custom\") pod \"52f182f2-1e00-4c5d-9101-dff47f7c7b85\" (UID: \"52f182f2-1e00-4c5d-9101-dff47f7c7b85\") " Oct 11 04:47:40 crc kubenswrapper[4798]: I1011 04:47:40.130583 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/52f182f2-1e00-4c5d-9101-dff47f7c7b85-config-data\") pod \"52f182f2-1e00-4c5d-9101-dff47f7c7b85\" (UID: \"52f182f2-1e00-4c5d-9101-dff47f7c7b85\") " Oct 11 04:47:40 crc kubenswrapper[4798]: I1011 04:47:40.130689 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xj85d\" (UniqueName: \"kubernetes.io/projected/52f182f2-1e00-4c5d-9101-dff47f7c7b85-kube-api-access-xj85d\") pod \"52f182f2-1e00-4c5d-9101-dff47f7c7b85\" (UID: \"52f182f2-1e00-4c5d-9101-dff47f7c7b85\") " Oct 11 04:47:40 crc kubenswrapper[4798]: I1011 04:47:40.130782 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/52f182f2-1e00-4c5d-9101-dff47f7c7b85-combined-ca-bundle\") pod \"52f182f2-1e00-4c5d-9101-dff47f7c7b85\" (UID: \"52f182f2-1e00-4c5d-9101-dff47f7c7b85\") " Oct 11 04:47:40 crc kubenswrapper[4798]: I1011 04:47:40.132158 4798 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/52f182f2-1e00-4c5d-9101-dff47f7c7b85-etc-machine-id\") on node \"crc\" DevicePath \"\"" Oct 11 04:47:40 crc kubenswrapper[4798]: I1011 04:47:40.140036 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/52f182f2-1e00-4c5d-9101-dff47f7c7b85-scripts" (OuterVolumeSpecName: "scripts") pod "52f182f2-1e00-4c5d-9101-dff47f7c7b85" (UID: "52f182f2-1e00-4c5d-9101-dff47f7c7b85"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:47:40 crc kubenswrapper[4798]: I1011 04:47:40.140198 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/52f182f2-1e00-4c5d-9101-dff47f7c7b85-kube-api-access-xj85d" (OuterVolumeSpecName: "kube-api-access-xj85d") pod "52f182f2-1e00-4c5d-9101-dff47f7c7b85" (UID: "52f182f2-1e00-4c5d-9101-dff47f7c7b85"). InnerVolumeSpecName "kube-api-access-xj85d". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 04:47:40 crc kubenswrapper[4798]: I1011 04:47:40.141817 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/52f182f2-1e00-4c5d-9101-dff47f7c7b85-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "52f182f2-1e00-4c5d-9101-dff47f7c7b85" (UID: "52f182f2-1e00-4c5d-9101-dff47f7c7b85"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:47:40 crc kubenswrapper[4798]: I1011 04:47:40.216322 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/manila-api-0" Oct 11 04:47:40 crc kubenswrapper[4798]: I1011 04:47:40.221121 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/52f182f2-1e00-4c5d-9101-dff47f7c7b85-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "52f182f2-1e00-4c5d-9101-dff47f7c7b85" (UID: "52f182f2-1e00-4c5d-9101-dff47f7c7b85"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:47:40 crc kubenswrapper[4798]: I1011 04:47:40.242565 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xj85d\" (UniqueName: \"kubernetes.io/projected/52f182f2-1e00-4c5d-9101-dff47f7c7b85-kube-api-access-xj85d\") on node \"crc\" DevicePath \"\"" Oct 11 04:47:40 crc kubenswrapper[4798]: I1011 04:47:40.242647 4798 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/52f182f2-1e00-4c5d-9101-dff47f7c7b85-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 11 04:47:40 crc kubenswrapper[4798]: I1011 04:47:40.242659 4798 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/52f182f2-1e00-4c5d-9101-dff47f7c7b85-scripts\") on node \"crc\" DevicePath \"\"" Oct 11 04:47:40 crc kubenswrapper[4798]: I1011 04:47:40.242669 4798 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/52f182f2-1e00-4c5d-9101-dff47f7c7b85-config-data-custom\") on node \"crc\" DevicePath \"\"" Oct 11 04:47:40 crc kubenswrapper[4798]: I1011 04:47:40.322734 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/52f182f2-1e00-4c5d-9101-dff47f7c7b85-config-data" (OuterVolumeSpecName: "config-data") pod "52f182f2-1e00-4c5d-9101-dff47f7c7b85" (UID: "52f182f2-1e00-4c5d-9101-dff47f7c7b85"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:47:40 crc kubenswrapper[4798]: I1011 04:47:40.344691 4798 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/52f182f2-1e00-4c5d-9101-dff47f7c7b85-config-data\") on node \"crc\" DevicePath \"\"" Oct 11 04:47:40 crc kubenswrapper[4798]: I1011 04:47:40.914402 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-scheduler-0" event={"ID":"52f182f2-1e00-4c5d-9101-dff47f7c7b85","Type":"ContainerDied","Data":"d5797e077cca131422c6cd601cb05f8aa054eaff2740607d3bb7024f640405cf"} Oct 11 04:47:40 crc kubenswrapper[4798]: I1011 04:47:40.914783 4798 scope.go:117] "RemoveContainer" containerID="965d3bde34ca8a3bf5bac23e703476a3357bdd8e6b3204be80e54f0960c02648" Oct 11 04:47:40 crc kubenswrapper[4798]: I1011 04:47:40.914958 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/manila-scheduler-0" Oct 11 04:47:40 crc kubenswrapper[4798]: I1011 04:47:40.958032 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/manila-scheduler-0"] Oct 11 04:47:40 crc kubenswrapper[4798]: I1011 04:47:40.969089 4798 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/manila-scheduler-0"] Oct 11 04:47:40 crc kubenswrapper[4798]: I1011 04:47:40.984019 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/manila-scheduler-0"] Oct 11 04:47:40 crc kubenswrapper[4798]: E1011 04:47:40.984582 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="52f182f2-1e00-4c5d-9101-dff47f7c7b85" containerName="manila-scheduler" Oct 11 04:47:40 crc kubenswrapper[4798]: I1011 04:47:40.984608 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="52f182f2-1e00-4c5d-9101-dff47f7c7b85" containerName="manila-scheduler" Oct 11 04:47:40 crc kubenswrapper[4798]: E1011 04:47:40.984640 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="52f182f2-1e00-4c5d-9101-dff47f7c7b85" containerName="probe" Oct 11 04:47:40 crc kubenswrapper[4798]: I1011 04:47:40.984648 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="52f182f2-1e00-4c5d-9101-dff47f7c7b85" containerName="probe" Oct 11 04:47:40 crc kubenswrapper[4798]: I1011 04:47:40.984896 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="52f182f2-1e00-4c5d-9101-dff47f7c7b85" containerName="manila-scheduler" Oct 11 04:47:40 crc kubenswrapper[4798]: I1011 04:47:40.984943 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="52f182f2-1e00-4c5d-9101-dff47f7c7b85" containerName="probe" Oct 11 04:47:40 crc kubenswrapper[4798]: I1011 04:47:40.986138 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-scheduler-0" Oct 11 04:47:40 crc kubenswrapper[4798]: I1011 04:47:40.988235 4798 scope.go:117] "RemoveContainer" containerID="d9589fd1679892ecd011355a006f1e4c35bfe2c0a069c79bf8402ccdc209fb7b" Oct 11 04:47:40 crc kubenswrapper[4798]: I1011 04:47:40.989233 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"manila-scheduler-config-data" Oct 11 04:47:41 crc kubenswrapper[4798]: I1011 04:47:41.026755 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-scheduler-0"] Oct 11 04:47:41 crc kubenswrapper[4798]: I1011 04:47:41.063241 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8a3790b4-be7a-434f-90c1-3c0b6623b2a5-combined-ca-bundle\") pod \"manila-scheduler-0\" (UID: \"8a3790b4-be7a-434f-90c1-3c0b6623b2a5\") " pod="openstack/manila-scheduler-0" Oct 11 04:47:41 crc kubenswrapper[4798]: I1011 04:47:41.063326 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/8a3790b4-be7a-434f-90c1-3c0b6623b2a5-etc-machine-id\") pod \"manila-scheduler-0\" (UID: \"8a3790b4-be7a-434f-90c1-3c0b6623b2a5\") " pod="openstack/manila-scheduler-0" Oct 11 04:47:41 crc kubenswrapper[4798]: I1011 04:47:41.063375 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fxkcb\" (UniqueName: \"kubernetes.io/projected/8a3790b4-be7a-434f-90c1-3c0b6623b2a5-kube-api-access-fxkcb\") pod \"manila-scheduler-0\" (UID: \"8a3790b4-be7a-434f-90c1-3c0b6623b2a5\") " pod="openstack/manila-scheduler-0" Oct 11 04:47:41 crc kubenswrapper[4798]: I1011 04:47:41.063502 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8a3790b4-be7a-434f-90c1-3c0b6623b2a5-config-data\") pod \"manila-scheduler-0\" (UID: \"8a3790b4-be7a-434f-90c1-3c0b6623b2a5\") " pod="openstack/manila-scheduler-0" Oct 11 04:47:41 crc kubenswrapper[4798]: I1011 04:47:41.063540 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/8a3790b4-be7a-434f-90c1-3c0b6623b2a5-config-data-custom\") pod \"manila-scheduler-0\" (UID: \"8a3790b4-be7a-434f-90c1-3c0b6623b2a5\") " pod="openstack/manila-scheduler-0" Oct 11 04:47:41 crc kubenswrapper[4798]: I1011 04:47:41.063566 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8a3790b4-be7a-434f-90c1-3c0b6623b2a5-scripts\") pod \"manila-scheduler-0\" (UID: \"8a3790b4-be7a-434f-90c1-3c0b6623b2a5\") " pod="openstack/manila-scheduler-0" Oct 11 04:47:41 crc kubenswrapper[4798]: I1011 04:47:41.166556 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8a3790b4-be7a-434f-90c1-3c0b6623b2a5-config-data\") pod \"manila-scheduler-0\" (UID: \"8a3790b4-be7a-434f-90c1-3c0b6623b2a5\") " pod="openstack/manila-scheduler-0" Oct 11 04:47:41 crc kubenswrapper[4798]: I1011 04:47:41.166652 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/8a3790b4-be7a-434f-90c1-3c0b6623b2a5-config-data-custom\") pod \"manila-scheduler-0\" (UID: \"8a3790b4-be7a-434f-90c1-3c0b6623b2a5\") " pod="openstack/manila-scheduler-0" Oct 11 04:47:41 crc kubenswrapper[4798]: I1011 04:47:41.166694 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8a3790b4-be7a-434f-90c1-3c0b6623b2a5-scripts\") pod \"manila-scheduler-0\" (UID: \"8a3790b4-be7a-434f-90c1-3c0b6623b2a5\") " pod="openstack/manila-scheduler-0" Oct 11 04:47:41 crc kubenswrapper[4798]: I1011 04:47:41.166788 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8a3790b4-be7a-434f-90c1-3c0b6623b2a5-combined-ca-bundle\") pod \"manila-scheduler-0\" (UID: \"8a3790b4-be7a-434f-90c1-3c0b6623b2a5\") " pod="openstack/manila-scheduler-0" Oct 11 04:47:41 crc kubenswrapper[4798]: I1011 04:47:41.166824 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/8a3790b4-be7a-434f-90c1-3c0b6623b2a5-etc-machine-id\") pod \"manila-scheduler-0\" (UID: \"8a3790b4-be7a-434f-90c1-3c0b6623b2a5\") " pod="openstack/manila-scheduler-0" Oct 11 04:47:41 crc kubenswrapper[4798]: I1011 04:47:41.166853 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fxkcb\" (UniqueName: \"kubernetes.io/projected/8a3790b4-be7a-434f-90c1-3c0b6623b2a5-kube-api-access-fxkcb\") pod \"manila-scheduler-0\" (UID: \"8a3790b4-be7a-434f-90c1-3c0b6623b2a5\") " pod="openstack/manila-scheduler-0" Oct 11 04:47:41 crc kubenswrapper[4798]: I1011 04:47:41.167911 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/8a3790b4-be7a-434f-90c1-3c0b6623b2a5-etc-machine-id\") pod \"manila-scheduler-0\" (UID: \"8a3790b4-be7a-434f-90c1-3c0b6623b2a5\") " pod="openstack/manila-scheduler-0" Oct 11 04:47:41 crc kubenswrapper[4798]: I1011 04:47:41.173937 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8a3790b4-be7a-434f-90c1-3c0b6623b2a5-scripts\") pod \"manila-scheduler-0\" (UID: \"8a3790b4-be7a-434f-90c1-3c0b6623b2a5\") " pod="openstack/manila-scheduler-0" Oct 11 04:47:41 crc kubenswrapper[4798]: I1011 04:47:41.180639 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/8a3790b4-be7a-434f-90c1-3c0b6623b2a5-config-data-custom\") pod \"manila-scheduler-0\" (UID: \"8a3790b4-be7a-434f-90c1-3c0b6623b2a5\") " pod="openstack/manila-scheduler-0" Oct 11 04:47:41 crc kubenswrapper[4798]: I1011 04:47:41.183520 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fxkcb\" (UniqueName: \"kubernetes.io/projected/8a3790b4-be7a-434f-90c1-3c0b6623b2a5-kube-api-access-fxkcb\") pod \"manila-scheduler-0\" (UID: \"8a3790b4-be7a-434f-90c1-3c0b6623b2a5\") " pod="openstack/manila-scheduler-0" Oct 11 04:47:41 crc kubenswrapper[4798]: I1011 04:47:41.186562 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8a3790b4-be7a-434f-90c1-3c0b6623b2a5-config-data\") pod \"manila-scheduler-0\" (UID: \"8a3790b4-be7a-434f-90c1-3c0b6623b2a5\") " pod="openstack/manila-scheduler-0" Oct 11 04:47:41 crc kubenswrapper[4798]: I1011 04:47:41.186829 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8a3790b4-be7a-434f-90c1-3c0b6623b2a5-combined-ca-bundle\") pod \"manila-scheduler-0\" (UID: \"8a3790b4-be7a-434f-90c1-3c0b6623b2a5\") " pod="openstack/manila-scheduler-0" Oct 11 04:47:41 crc kubenswrapper[4798]: I1011 04:47:41.343305 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-scheduler-0" Oct 11 04:47:41 crc kubenswrapper[4798]: I1011 04:47:41.454529 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="52f182f2-1e00-4c5d-9101-dff47f7c7b85" path="/var/lib/kubelet/pods/52f182f2-1e00-4c5d-9101-dff47f7c7b85/volumes" Oct 11 04:47:41 crc kubenswrapper[4798]: I1011 04:47:41.863588 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-scheduler-0"] Oct 11 04:47:41 crc kubenswrapper[4798]: I1011 04:47:41.927729 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-scheduler-0" event={"ID":"8a3790b4-be7a-434f-90c1-3c0b6623b2a5","Type":"ContainerStarted","Data":"8aa2525ab56b4ca2332b26aa63ffb1be04fd02148d0a898c01efa1ef96c814a7"} Oct 11 04:47:42 crc kubenswrapper[4798]: I1011 04:47:42.941766 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-scheduler-0" event={"ID":"8a3790b4-be7a-434f-90c1-3c0b6623b2a5","Type":"ContainerStarted","Data":"c183d3a7eac5ecb545c067a40ee6faedb84b35d18933828c714374e86f6409dc"} Oct 11 04:47:42 crc kubenswrapper[4798]: I1011 04:47:42.942260 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-scheduler-0" event={"ID":"8a3790b4-be7a-434f-90c1-3c0b6623b2a5","Type":"ContainerStarted","Data":"1c274cf5597ea9f58f8030b6112783f4a1922d56feaf8309194f6d49adbc738d"} Oct 11 04:47:42 crc kubenswrapper[4798]: I1011 04:47:42.963301 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/manila-scheduler-0" podStartSLOduration=2.9632548659999998 podStartE2EDuration="2.963254866s" podCreationTimestamp="2025-10-11 04:47:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 04:47:42.961075306 +0000 UTC m=+3158.297364992" watchObservedRunningTime="2025-10-11 04:47:42.963254866 +0000 UTC m=+3158.299544552" Oct 11 04:47:43 crc kubenswrapper[4798]: I1011 04:47:43.137868 4798 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-78d45886d-tlzfl" podUID="0c09ff0d-de2b-4e1d-b39f-f467c8b03fb7" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.242:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.242:8443: connect: connection refused" Oct 11 04:47:43 crc kubenswrapper[4798]: I1011 04:47:43.138014 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-78d45886d-tlzfl" Oct 11 04:47:43 crc kubenswrapper[4798]: I1011 04:47:43.720559 4798 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/manila-share-share1-0" Oct 11 04:47:43 crc kubenswrapper[4798]: I1011 04:47:43.819317 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/manila-share-share1-0"] Oct 11 04:47:43 crc kubenswrapper[4798]: I1011 04:47:43.952738 4798 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/manila-share-share1-0" podUID="3aa8fa74-0fd3-468d-935b-a819f2383dfb" containerName="manila-share" containerID="cri-o://a2bcddff7277f6c827cc95a984acddb98118e5a3debaeb243aa493fc2447e1c8" gracePeriod=30 Oct 11 04:47:43 crc kubenswrapper[4798]: I1011 04:47:43.952893 4798 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/manila-share-share1-0" podUID="3aa8fa74-0fd3-468d-935b-a819f2383dfb" containerName="probe" containerID="cri-o://cddbb04d4f074bea6fc46f11006ddccdc4411586963a24a0a556330a4164b0bc" gracePeriod=30 Oct 11 04:47:44 crc kubenswrapper[4798]: I1011 04:47:44.965573 4798 generic.go:334] "Generic (PLEG): container finished" podID="3aa8fa74-0fd3-468d-935b-a819f2383dfb" containerID="cddbb04d4f074bea6fc46f11006ddccdc4411586963a24a0a556330a4164b0bc" exitCode=0 Oct 11 04:47:44 crc kubenswrapper[4798]: I1011 04:47:44.966115 4798 generic.go:334] "Generic (PLEG): container finished" podID="3aa8fa74-0fd3-468d-935b-a819f2383dfb" containerID="a2bcddff7277f6c827cc95a984acddb98118e5a3debaeb243aa493fc2447e1c8" exitCode=1 Oct 11 04:47:44 crc kubenswrapper[4798]: I1011 04:47:44.965712 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-share-share1-0" event={"ID":"3aa8fa74-0fd3-468d-935b-a819f2383dfb","Type":"ContainerDied","Data":"cddbb04d4f074bea6fc46f11006ddccdc4411586963a24a0a556330a4164b0bc"} Oct 11 04:47:44 crc kubenswrapper[4798]: I1011 04:47:44.966168 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-share-share1-0" event={"ID":"3aa8fa74-0fd3-468d-935b-a819f2383dfb","Type":"ContainerDied","Data":"a2bcddff7277f6c827cc95a984acddb98118e5a3debaeb243aa493fc2447e1c8"} Oct 11 04:47:45 crc kubenswrapper[4798]: I1011 04:47:45.141359 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/manila-share-share1-0" Oct 11 04:47:45 crc kubenswrapper[4798]: I1011 04:47:45.190498 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/3aa8fa74-0fd3-468d-935b-a819f2383dfb-etc-machine-id\") pod \"3aa8fa74-0fd3-468d-935b-a819f2383dfb\" (UID: \"3aa8fa74-0fd3-468d-935b-a819f2383dfb\") " Oct 11 04:47:45 crc kubenswrapper[4798]: I1011 04:47:45.190608 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/3aa8fa74-0fd3-468d-935b-a819f2383dfb-config-data-custom\") pod \"3aa8fa74-0fd3-468d-935b-a819f2383dfb\" (UID: \"3aa8fa74-0fd3-468d-935b-a819f2383dfb\") " Oct 11 04:47:45 crc kubenswrapper[4798]: I1011 04:47:45.190713 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3aa8fa74-0fd3-468d-935b-a819f2383dfb-config-data\") pod \"3aa8fa74-0fd3-468d-935b-a819f2383dfb\" (UID: \"3aa8fa74-0fd3-468d-935b-a819f2383dfb\") " Oct 11 04:47:45 crc kubenswrapper[4798]: I1011 04:47:45.190937 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3aa8fa74-0fd3-468d-935b-a819f2383dfb-scripts\") pod \"3aa8fa74-0fd3-468d-935b-a819f2383dfb\" (UID: \"3aa8fa74-0fd3-468d-935b-a819f2383dfb\") " Oct 11 04:47:45 crc kubenswrapper[4798]: I1011 04:47:45.191050 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rx5r5\" (UniqueName: \"kubernetes.io/projected/3aa8fa74-0fd3-468d-935b-a819f2383dfb-kube-api-access-rx5r5\") pod \"3aa8fa74-0fd3-468d-935b-a819f2383dfb\" (UID: \"3aa8fa74-0fd3-468d-935b-a819f2383dfb\") " Oct 11 04:47:45 crc kubenswrapper[4798]: I1011 04:47:45.191094 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lib-manila\" (UniqueName: \"kubernetes.io/host-path/3aa8fa74-0fd3-468d-935b-a819f2383dfb-var-lib-manila\") pod \"3aa8fa74-0fd3-468d-935b-a819f2383dfb\" (UID: \"3aa8fa74-0fd3-468d-935b-a819f2383dfb\") " Oct 11 04:47:45 crc kubenswrapper[4798]: I1011 04:47:45.191160 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3aa8fa74-0fd3-468d-935b-a819f2383dfb-combined-ca-bundle\") pod \"3aa8fa74-0fd3-468d-935b-a819f2383dfb\" (UID: \"3aa8fa74-0fd3-468d-935b-a819f2383dfb\") " Oct 11 04:47:45 crc kubenswrapper[4798]: I1011 04:47:45.191249 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/3aa8fa74-0fd3-468d-935b-a819f2383dfb-ceph\") pod \"3aa8fa74-0fd3-468d-935b-a819f2383dfb\" (UID: \"3aa8fa74-0fd3-468d-935b-a819f2383dfb\") " Oct 11 04:47:45 crc kubenswrapper[4798]: I1011 04:47:45.193777 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3aa8fa74-0fd3-468d-935b-a819f2383dfb-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "3aa8fa74-0fd3-468d-935b-a819f2383dfb" (UID: "3aa8fa74-0fd3-468d-935b-a819f2383dfb"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 11 04:47:45 crc kubenswrapper[4798]: I1011 04:47:45.194698 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3aa8fa74-0fd3-468d-935b-a819f2383dfb-var-lib-manila" (OuterVolumeSpecName: "var-lib-manila") pod "3aa8fa74-0fd3-468d-935b-a819f2383dfb" (UID: "3aa8fa74-0fd3-468d-935b-a819f2383dfb"). InnerVolumeSpecName "var-lib-manila". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 11 04:47:45 crc kubenswrapper[4798]: I1011 04:47:45.210011 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3aa8fa74-0fd3-468d-935b-a819f2383dfb-ceph" (OuterVolumeSpecName: "ceph") pod "3aa8fa74-0fd3-468d-935b-a819f2383dfb" (UID: "3aa8fa74-0fd3-468d-935b-a819f2383dfb"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 04:47:45 crc kubenswrapper[4798]: I1011 04:47:45.210061 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3aa8fa74-0fd3-468d-935b-a819f2383dfb-scripts" (OuterVolumeSpecName: "scripts") pod "3aa8fa74-0fd3-468d-935b-a819f2383dfb" (UID: "3aa8fa74-0fd3-468d-935b-a819f2383dfb"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:47:45 crc kubenswrapper[4798]: I1011 04:47:45.216692 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3aa8fa74-0fd3-468d-935b-a819f2383dfb-kube-api-access-rx5r5" (OuterVolumeSpecName: "kube-api-access-rx5r5") pod "3aa8fa74-0fd3-468d-935b-a819f2383dfb" (UID: "3aa8fa74-0fd3-468d-935b-a819f2383dfb"). InnerVolumeSpecName "kube-api-access-rx5r5". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 04:47:45 crc kubenswrapper[4798]: I1011 04:47:45.224726 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3aa8fa74-0fd3-468d-935b-a819f2383dfb-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "3aa8fa74-0fd3-468d-935b-a819f2383dfb" (UID: "3aa8fa74-0fd3-468d-935b-a819f2383dfb"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:47:45 crc kubenswrapper[4798]: I1011 04:47:45.257565 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3aa8fa74-0fd3-468d-935b-a819f2383dfb-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "3aa8fa74-0fd3-468d-935b-a819f2383dfb" (UID: "3aa8fa74-0fd3-468d-935b-a819f2383dfb"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:47:45 crc kubenswrapper[4798]: I1011 04:47:45.294432 4798 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/3aa8fa74-0fd3-468d-935b-a819f2383dfb-etc-machine-id\") on node \"crc\" DevicePath \"\"" Oct 11 04:47:45 crc kubenswrapper[4798]: I1011 04:47:45.294764 4798 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/3aa8fa74-0fd3-468d-935b-a819f2383dfb-config-data-custom\") on node \"crc\" DevicePath \"\"" Oct 11 04:47:45 crc kubenswrapper[4798]: I1011 04:47:45.294839 4798 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3aa8fa74-0fd3-468d-935b-a819f2383dfb-scripts\") on node \"crc\" DevicePath \"\"" Oct 11 04:47:45 crc kubenswrapper[4798]: I1011 04:47:45.294892 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rx5r5\" (UniqueName: \"kubernetes.io/projected/3aa8fa74-0fd3-468d-935b-a819f2383dfb-kube-api-access-rx5r5\") on node \"crc\" DevicePath \"\"" Oct 11 04:47:45 crc kubenswrapper[4798]: I1011 04:47:45.294969 4798 reconciler_common.go:293] "Volume detached for volume \"var-lib-manila\" (UniqueName: \"kubernetes.io/host-path/3aa8fa74-0fd3-468d-935b-a819f2383dfb-var-lib-manila\") on node \"crc\" DevicePath \"\"" Oct 11 04:47:45 crc kubenswrapper[4798]: I1011 04:47:45.295028 4798 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3aa8fa74-0fd3-468d-935b-a819f2383dfb-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 11 04:47:45 crc kubenswrapper[4798]: I1011 04:47:45.295086 4798 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/3aa8fa74-0fd3-468d-935b-a819f2383dfb-ceph\") on node \"crc\" DevicePath \"\"" Oct 11 04:47:45 crc kubenswrapper[4798]: I1011 04:47:45.329662 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3aa8fa74-0fd3-468d-935b-a819f2383dfb-config-data" (OuterVolumeSpecName: "config-data") pod "3aa8fa74-0fd3-468d-935b-a819f2383dfb" (UID: "3aa8fa74-0fd3-468d-935b-a819f2383dfb"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:47:45 crc kubenswrapper[4798]: I1011 04:47:45.397362 4798 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3aa8fa74-0fd3-468d-935b-a819f2383dfb-config-data\") on node \"crc\" DevicePath \"\"" Oct 11 04:47:45 crc kubenswrapper[4798]: I1011 04:47:45.994365 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-share-share1-0" event={"ID":"3aa8fa74-0fd3-468d-935b-a819f2383dfb","Type":"ContainerDied","Data":"600b943243dd68eeb305b6de0179681720e9e7b44ebc8407391aff530fa41e2c"} Oct 11 04:47:45 crc kubenswrapper[4798]: I1011 04:47:45.994905 4798 scope.go:117] "RemoveContainer" containerID="cddbb04d4f074bea6fc46f11006ddccdc4411586963a24a0a556330a4164b0bc" Oct 11 04:47:45 crc kubenswrapper[4798]: I1011 04:47:45.994533 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/manila-share-share1-0" Oct 11 04:47:46 crc kubenswrapper[4798]: I1011 04:47:46.024612 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/manila-share-share1-0"] Oct 11 04:47:46 crc kubenswrapper[4798]: I1011 04:47:46.036651 4798 scope.go:117] "RemoveContainer" containerID="a2bcddff7277f6c827cc95a984acddb98118e5a3debaeb243aa493fc2447e1c8" Oct 11 04:47:46 crc kubenswrapper[4798]: I1011 04:47:46.038455 4798 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/manila-share-share1-0"] Oct 11 04:47:46 crc kubenswrapper[4798]: I1011 04:47:46.081216 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/manila-share-share1-0"] Oct 11 04:47:46 crc kubenswrapper[4798]: E1011 04:47:46.081767 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3aa8fa74-0fd3-468d-935b-a819f2383dfb" containerName="probe" Oct 11 04:47:46 crc kubenswrapper[4798]: I1011 04:47:46.081790 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="3aa8fa74-0fd3-468d-935b-a819f2383dfb" containerName="probe" Oct 11 04:47:46 crc kubenswrapper[4798]: E1011 04:47:46.081815 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3aa8fa74-0fd3-468d-935b-a819f2383dfb" containerName="manila-share" Oct 11 04:47:46 crc kubenswrapper[4798]: I1011 04:47:46.081822 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="3aa8fa74-0fd3-468d-935b-a819f2383dfb" containerName="manila-share" Oct 11 04:47:46 crc kubenswrapper[4798]: I1011 04:47:46.082031 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="3aa8fa74-0fd3-468d-935b-a819f2383dfb" containerName="probe" Oct 11 04:47:46 crc kubenswrapper[4798]: I1011 04:47:46.082058 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="3aa8fa74-0fd3-468d-935b-a819f2383dfb" containerName="manila-share" Oct 11 04:47:46 crc kubenswrapper[4798]: I1011 04:47:46.083194 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-share-share1-0" Oct 11 04:47:46 crc kubenswrapper[4798]: I1011 04:47:46.089831 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"manila-share-share1-config-data" Oct 11 04:47:46 crc kubenswrapper[4798]: I1011 04:47:46.112999 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-share-share1-0"] Oct 11 04:47:46 crc kubenswrapper[4798]: I1011 04:47:46.115838 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/49ae1ecc-49d1-4cea-b7c6-2eb0cc0eb582-combined-ca-bundle\") pod \"manila-share-share1-0\" (UID: \"49ae1ecc-49d1-4cea-b7c6-2eb0cc0eb582\") " pod="openstack/manila-share-share1-0" Oct 11 04:47:46 crc kubenswrapper[4798]: I1011 04:47:46.115878 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-manila\" (UniqueName: \"kubernetes.io/host-path/49ae1ecc-49d1-4cea-b7c6-2eb0cc0eb582-var-lib-manila\") pod \"manila-share-share1-0\" (UID: \"49ae1ecc-49d1-4cea-b7c6-2eb0cc0eb582\") " pod="openstack/manila-share-share1-0" Oct 11 04:47:46 crc kubenswrapper[4798]: I1011 04:47:46.115994 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/49ae1ecc-49d1-4cea-b7c6-2eb0cc0eb582-config-data-custom\") pod \"manila-share-share1-0\" (UID: \"49ae1ecc-49d1-4cea-b7c6-2eb0cc0eb582\") " pod="openstack/manila-share-share1-0" Oct 11 04:47:46 crc kubenswrapper[4798]: I1011 04:47:46.116036 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/49ae1ecc-49d1-4cea-b7c6-2eb0cc0eb582-etc-machine-id\") pod \"manila-share-share1-0\" (UID: \"49ae1ecc-49d1-4cea-b7c6-2eb0cc0eb582\") " pod="openstack/manila-share-share1-0" Oct 11 04:47:46 crc kubenswrapper[4798]: I1011 04:47:46.116067 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/49ae1ecc-49d1-4cea-b7c6-2eb0cc0eb582-scripts\") pod \"manila-share-share1-0\" (UID: \"49ae1ecc-49d1-4cea-b7c6-2eb0cc0eb582\") " pod="openstack/manila-share-share1-0" Oct 11 04:47:46 crc kubenswrapper[4798]: I1011 04:47:46.116182 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xxg2m\" (UniqueName: \"kubernetes.io/projected/49ae1ecc-49d1-4cea-b7c6-2eb0cc0eb582-kube-api-access-xxg2m\") pod \"manila-share-share1-0\" (UID: \"49ae1ecc-49d1-4cea-b7c6-2eb0cc0eb582\") " pod="openstack/manila-share-share1-0" Oct 11 04:47:46 crc kubenswrapper[4798]: I1011 04:47:46.116237 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/49ae1ecc-49d1-4cea-b7c6-2eb0cc0eb582-ceph\") pod \"manila-share-share1-0\" (UID: \"49ae1ecc-49d1-4cea-b7c6-2eb0cc0eb582\") " pod="openstack/manila-share-share1-0" Oct 11 04:47:46 crc kubenswrapper[4798]: I1011 04:47:46.116350 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/49ae1ecc-49d1-4cea-b7c6-2eb0cc0eb582-config-data\") pod \"manila-share-share1-0\" (UID: \"49ae1ecc-49d1-4cea-b7c6-2eb0cc0eb582\") " pod="openstack/manila-share-share1-0" Oct 11 04:47:46 crc kubenswrapper[4798]: I1011 04:47:46.218831 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xxg2m\" (UniqueName: \"kubernetes.io/projected/49ae1ecc-49d1-4cea-b7c6-2eb0cc0eb582-kube-api-access-xxg2m\") pod \"manila-share-share1-0\" (UID: \"49ae1ecc-49d1-4cea-b7c6-2eb0cc0eb582\") " pod="openstack/manila-share-share1-0" Oct 11 04:47:46 crc kubenswrapper[4798]: I1011 04:47:46.218907 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/49ae1ecc-49d1-4cea-b7c6-2eb0cc0eb582-ceph\") pod \"manila-share-share1-0\" (UID: \"49ae1ecc-49d1-4cea-b7c6-2eb0cc0eb582\") " pod="openstack/manila-share-share1-0" Oct 11 04:47:46 crc kubenswrapper[4798]: I1011 04:47:46.218933 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/49ae1ecc-49d1-4cea-b7c6-2eb0cc0eb582-config-data\") pod \"manila-share-share1-0\" (UID: \"49ae1ecc-49d1-4cea-b7c6-2eb0cc0eb582\") " pod="openstack/manila-share-share1-0" Oct 11 04:47:46 crc kubenswrapper[4798]: I1011 04:47:46.218978 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/49ae1ecc-49d1-4cea-b7c6-2eb0cc0eb582-combined-ca-bundle\") pod \"manila-share-share1-0\" (UID: \"49ae1ecc-49d1-4cea-b7c6-2eb0cc0eb582\") " pod="openstack/manila-share-share1-0" Oct 11 04:47:46 crc kubenswrapper[4798]: I1011 04:47:46.219006 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-manila\" (UniqueName: \"kubernetes.io/host-path/49ae1ecc-49d1-4cea-b7c6-2eb0cc0eb582-var-lib-manila\") pod \"manila-share-share1-0\" (UID: \"49ae1ecc-49d1-4cea-b7c6-2eb0cc0eb582\") " pod="openstack/manila-share-share1-0" Oct 11 04:47:46 crc kubenswrapper[4798]: I1011 04:47:46.219065 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/49ae1ecc-49d1-4cea-b7c6-2eb0cc0eb582-config-data-custom\") pod \"manila-share-share1-0\" (UID: \"49ae1ecc-49d1-4cea-b7c6-2eb0cc0eb582\") " pod="openstack/manila-share-share1-0" Oct 11 04:47:46 crc kubenswrapper[4798]: I1011 04:47:46.219087 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/49ae1ecc-49d1-4cea-b7c6-2eb0cc0eb582-etc-machine-id\") pod \"manila-share-share1-0\" (UID: \"49ae1ecc-49d1-4cea-b7c6-2eb0cc0eb582\") " pod="openstack/manila-share-share1-0" Oct 11 04:47:46 crc kubenswrapper[4798]: I1011 04:47:46.219112 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/49ae1ecc-49d1-4cea-b7c6-2eb0cc0eb582-scripts\") pod \"manila-share-share1-0\" (UID: \"49ae1ecc-49d1-4cea-b7c6-2eb0cc0eb582\") " pod="openstack/manila-share-share1-0" Oct 11 04:47:46 crc kubenswrapper[4798]: I1011 04:47:46.220126 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-manila\" (UniqueName: \"kubernetes.io/host-path/49ae1ecc-49d1-4cea-b7c6-2eb0cc0eb582-var-lib-manila\") pod \"manila-share-share1-0\" (UID: \"49ae1ecc-49d1-4cea-b7c6-2eb0cc0eb582\") " pod="openstack/manila-share-share1-0" Oct 11 04:47:46 crc kubenswrapper[4798]: I1011 04:47:46.220250 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/49ae1ecc-49d1-4cea-b7c6-2eb0cc0eb582-etc-machine-id\") pod \"manila-share-share1-0\" (UID: \"49ae1ecc-49d1-4cea-b7c6-2eb0cc0eb582\") " pod="openstack/manila-share-share1-0" Oct 11 04:47:46 crc kubenswrapper[4798]: I1011 04:47:46.224794 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/49ae1ecc-49d1-4cea-b7c6-2eb0cc0eb582-combined-ca-bundle\") pod \"manila-share-share1-0\" (UID: \"49ae1ecc-49d1-4cea-b7c6-2eb0cc0eb582\") " pod="openstack/manila-share-share1-0" Oct 11 04:47:46 crc kubenswrapper[4798]: I1011 04:47:46.225559 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/49ae1ecc-49d1-4cea-b7c6-2eb0cc0eb582-ceph\") pod \"manila-share-share1-0\" (UID: \"49ae1ecc-49d1-4cea-b7c6-2eb0cc0eb582\") " pod="openstack/manila-share-share1-0" Oct 11 04:47:46 crc kubenswrapper[4798]: I1011 04:47:46.226076 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/49ae1ecc-49d1-4cea-b7c6-2eb0cc0eb582-config-data-custom\") pod \"manila-share-share1-0\" (UID: \"49ae1ecc-49d1-4cea-b7c6-2eb0cc0eb582\") " pod="openstack/manila-share-share1-0" Oct 11 04:47:46 crc kubenswrapper[4798]: I1011 04:47:46.226204 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/49ae1ecc-49d1-4cea-b7c6-2eb0cc0eb582-scripts\") pod \"manila-share-share1-0\" (UID: \"49ae1ecc-49d1-4cea-b7c6-2eb0cc0eb582\") " pod="openstack/manila-share-share1-0" Oct 11 04:47:46 crc kubenswrapper[4798]: I1011 04:47:46.231461 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/49ae1ecc-49d1-4cea-b7c6-2eb0cc0eb582-config-data\") pod \"manila-share-share1-0\" (UID: \"49ae1ecc-49d1-4cea-b7c6-2eb0cc0eb582\") " pod="openstack/manila-share-share1-0" Oct 11 04:47:46 crc kubenswrapper[4798]: I1011 04:47:46.238704 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xxg2m\" (UniqueName: \"kubernetes.io/projected/49ae1ecc-49d1-4cea-b7c6-2eb0cc0eb582-kube-api-access-xxg2m\") pod \"manila-share-share1-0\" (UID: \"49ae1ecc-49d1-4cea-b7c6-2eb0cc0eb582\") " pod="openstack/manila-share-share1-0" Oct 11 04:47:46 crc kubenswrapper[4798]: I1011 04:47:46.411750 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-share-share1-0" Oct 11 04:47:47 crc kubenswrapper[4798]: I1011 04:47:47.093082 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-share-share1-0"] Oct 11 04:47:47 crc kubenswrapper[4798]: I1011 04:47:47.441265 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3aa8fa74-0fd3-468d-935b-a819f2383dfb" path="/var/lib/kubelet/pods/3aa8fa74-0fd3-468d-935b-a819f2383dfb/volumes" Oct 11 04:47:47 crc kubenswrapper[4798]: I1011 04:47:47.503818 4798 pod_container_manager_linux.go:210] "Failed to delete cgroup paths" cgroupName=["kubepods","besteffort","pod778270e6-b796-4692-b341-fb2091a1576e"] err="unable to destroy cgroup paths for cgroup [kubepods besteffort pod778270e6-b796-4692-b341-fb2091a1576e] : Timed out while waiting for systemd to remove kubepods-besteffort-pod778270e6_b796_4692_b341_fb2091a1576e.slice" Oct 11 04:47:47 crc kubenswrapper[4798]: E1011 04:47:47.503886 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to delete cgroup paths for [kubepods besteffort pod778270e6-b796-4692-b341-fb2091a1576e] : unable to destroy cgroup paths for cgroup [kubepods besteffort pod778270e6-b796-4692-b341-fb2091a1576e] : Timed out while waiting for systemd to remove kubepods-besteffort-pod778270e6_b796_4692_b341_fb2091a1576e.slice" pod="openstack/horizon-bf5b8bcbc-wjw4p" podUID="778270e6-b796-4692-b341-fb2091a1576e" Oct 11 04:47:48 crc kubenswrapper[4798]: I1011 04:47:48.025582 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-share-share1-0" event={"ID":"49ae1ecc-49d1-4cea-b7c6-2eb0cc0eb582","Type":"ContainerStarted","Data":"44a4bac946e4e2d624ba7d490db5f1e9a368f3685e788bbc67c29b9ca9329846"} Oct 11 04:47:48 crc kubenswrapper[4798]: I1011 04:47:48.026116 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-share-share1-0" event={"ID":"49ae1ecc-49d1-4cea-b7c6-2eb0cc0eb582","Type":"ContainerStarted","Data":"3e870a86008a486d5b5f3b61aab4b6deb2406c5b5212efaca8918d3a389cf548"} Oct 11 04:47:48 crc kubenswrapper[4798]: I1011 04:47:48.025640 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-bf5b8bcbc-wjw4p" Oct 11 04:47:48 crc kubenswrapper[4798]: I1011 04:47:48.081850 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-bf5b8bcbc-wjw4p"] Oct 11 04:47:48 crc kubenswrapper[4798]: I1011 04:47:48.092168 4798 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-bf5b8bcbc-wjw4p"] Oct 11 04:47:48 crc kubenswrapper[4798]: I1011 04:47:48.971891 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-78d45886d-tlzfl" Oct 11 04:47:49 crc kubenswrapper[4798]: I1011 04:47:49.001593 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tj6fh\" (UniqueName: \"kubernetes.io/projected/0c09ff0d-de2b-4e1d-b39f-f467c8b03fb7-kube-api-access-tj6fh\") pod \"0c09ff0d-de2b-4e1d-b39f-f467c8b03fb7\" (UID: \"0c09ff0d-de2b-4e1d-b39f-f467c8b03fb7\") " Oct 11 04:47:49 crc kubenswrapper[4798]: I1011 04:47:49.001679 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/0c09ff0d-de2b-4e1d-b39f-f467c8b03fb7-scripts\") pod \"0c09ff0d-de2b-4e1d-b39f-f467c8b03fb7\" (UID: \"0c09ff0d-de2b-4e1d-b39f-f467c8b03fb7\") " Oct 11 04:47:49 crc kubenswrapper[4798]: I1011 04:47:49.001765 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/0c09ff0d-de2b-4e1d-b39f-f467c8b03fb7-horizon-secret-key\") pod \"0c09ff0d-de2b-4e1d-b39f-f467c8b03fb7\" (UID: \"0c09ff0d-de2b-4e1d-b39f-f467c8b03fb7\") " Oct 11 04:47:49 crc kubenswrapper[4798]: I1011 04:47:49.001800 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0c09ff0d-de2b-4e1d-b39f-f467c8b03fb7-logs\") pod \"0c09ff0d-de2b-4e1d-b39f-f467c8b03fb7\" (UID: \"0c09ff0d-de2b-4e1d-b39f-f467c8b03fb7\") " Oct 11 04:47:49 crc kubenswrapper[4798]: I1011 04:47:49.001853 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/0c09ff0d-de2b-4e1d-b39f-f467c8b03fb7-config-data\") pod \"0c09ff0d-de2b-4e1d-b39f-f467c8b03fb7\" (UID: \"0c09ff0d-de2b-4e1d-b39f-f467c8b03fb7\") " Oct 11 04:47:49 crc kubenswrapper[4798]: I1011 04:47:49.002001 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0c09ff0d-de2b-4e1d-b39f-f467c8b03fb7-combined-ca-bundle\") pod \"0c09ff0d-de2b-4e1d-b39f-f467c8b03fb7\" (UID: \"0c09ff0d-de2b-4e1d-b39f-f467c8b03fb7\") " Oct 11 04:47:49 crc kubenswrapper[4798]: I1011 04:47:49.002035 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/0c09ff0d-de2b-4e1d-b39f-f467c8b03fb7-horizon-tls-certs\") pod \"0c09ff0d-de2b-4e1d-b39f-f467c8b03fb7\" (UID: \"0c09ff0d-de2b-4e1d-b39f-f467c8b03fb7\") " Oct 11 04:47:49 crc kubenswrapper[4798]: I1011 04:47:49.002665 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0c09ff0d-de2b-4e1d-b39f-f467c8b03fb7-logs" (OuterVolumeSpecName: "logs") pod "0c09ff0d-de2b-4e1d-b39f-f467c8b03fb7" (UID: "0c09ff0d-de2b-4e1d-b39f-f467c8b03fb7"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 04:47:49 crc kubenswrapper[4798]: I1011 04:47:49.003069 4798 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0c09ff0d-de2b-4e1d-b39f-f467c8b03fb7-logs\") on node \"crc\" DevicePath \"\"" Oct 11 04:47:49 crc kubenswrapper[4798]: I1011 04:47:49.017749 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0c09ff0d-de2b-4e1d-b39f-f467c8b03fb7-kube-api-access-tj6fh" (OuterVolumeSpecName: "kube-api-access-tj6fh") pod "0c09ff0d-de2b-4e1d-b39f-f467c8b03fb7" (UID: "0c09ff0d-de2b-4e1d-b39f-f467c8b03fb7"). InnerVolumeSpecName "kube-api-access-tj6fh". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 04:47:49 crc kubenswrapper[4798]: I1011 04:47:49.045085 4798 generic.go:334] "Generic (PLEG): container finished" podID="0c09ff0d-de2b-4e1d-b39f-f467c8b03fb7" containerID="e5a0bf3052f9a7a689c723780253fef10a5e615d7ab1c128a44088070fa7c599" exitCode=137 Oct 11 04:47:49 crc kubenswrapper[4798]: I1011 04:47:49.045540 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-78d45886d-tlzfl" event={"ID":"0c09ff0d-de2b-4e1d-b39f-f467c8b03fb7","Type":"ContainerDied","Data":"e5a0bf3052f9a7a689c723780253fef10a5e615d7ab1c128a44088070fa7c599"} Oct 11 04:47:49 crc kubenswrapper[4798]: I1011 04:47:49.045608 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-78d45886d-tlzfl" Oct 11 04:47:49 crc kubenswrapper[4798]: I1011 04:47:49.045700 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-78d45886d-tlzfl" event={"ID":"0c09ff0d-de2b-4e1d-b39f-f467c8b03fb7","Type":"ContainerDied","Data":"800e9474268685164775ff59281f7153612fd0e947506cd883a82d994b3ae84b"} Oct 11 04:47:49 crc kubenswrapper[4798]: I1011 04:47:49.045773 4798 scope.go:117] "RemoveContainer" containerID="6e79ada2ce3c5b65f6f244fb7a98a83d07af92873ba6a0cf203af681fb65f44d" Oct 11 04:47:49 crc kubenswrapper[4798]: I1011 04:47:49.045321 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0c09ff0d-de2b-4e1d-b39f-f467c8b03fb7-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "0c09ff0d-de2b-4e1d-b39f-f467c8b03fb7" (UID: "0c09ff0d-de2b-4e1d-b39f-f467c8b03fb7"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:47:49 crc kubenswrapper[4798]: I1011 04:47:49.068614 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0c09ff0d-de2b-4e1d-b39f-f467c8b03fb7-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0c09ff0d-de2b-4e1d-b39f-f467c8b03fb7" (UID: "0c09ff0d-de2b-4e1d-b39f-f467c8b03fb7"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:47:49 crc kubenswrapper[4798]: I1011 04:47:49.071626 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-share-share1-0" event={"ID":"49ae1ecc-49d1-4cea-b7c6-2eb0cc0eb582","Type":"ContainerStarted","Data":"07f2a6fd0af00fe2f8347a247dae75f78fcc2e32c87ce82c1128ceb13ee87a83"} Oct 11 04:47:49 crc kubenswrapper[4798]: I1011 04:47:49.080013 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0c09ff0d-de2b-4e1d-b39f-f467c8b03fb7-scripts" (OuterVolumeSpecName: "scripts") pod "0c09ff0d-de2b-4e1d-b39f-f467c8b03fb7" (UID: "0c09ff0d-de2b-4e1d-b39f-f467c8b03fb7"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 04:47:49 crc kubenswrapper[4798]: I1011 04:47:49.087073 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0c09ff0d-de2b-4e1d-b39f-f467c8b03fb7-config-data" (OuterVolumeSpecName: "config-data") pod "0c09ff0d-de2b-4e1d-b39f-f467c8b03fb7" (UID: "0c09ff0d-de2b-4e1d-b39f-f467c8b03fb7"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 04:47:49 crc kubenswrapper[4798]: I1011 04:47:49.105633 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tj6fh\" (UniqueName: \"kubernetes.io/projected/0c09ff0d-de2b-4e1d-b39f-f467c8b03fb7-kube-api-access-tj6fh\") on node \"crc\" DevicePath \"\"" Oct 11 04:47:49 crc kubenswrapper[4798]: I1011 04:47:49.105673 4798 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/0c09ff0d-de2b-4e1d-b39f-f467c8b03fb7-scripts\") on node \"crc\" DevicePath \"\"" Oct 11 04:47:49 crc kubenswrapper[4798]: I1011 04:47:49.105687 4798 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/0c09ff0d-de2b-4e1d-b39f-f467c8b03fb7-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Oct 11 04:47:49 crc kubenswrapper[4798]: I1011 04:47:49.105697 4798 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/0c09ff0d-de2b-4e1d-b39f-f467c8b03fb7-config-data\") on node \"crc\" DevicePath \"\"" Oct 11 04:47:49 crc kubenswrapper[4798]: I1011 04:47:49.105708 4798 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0c09ff0d-de2b-4e1d-b39f-f467c8b03fb7-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 11 04:47:49 crc kubenswrapper[4798]: I1011 04:47:49.108357 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/manila-share-share1-0" podStartSLOduration=3.108335742 podStartE2EDuration="3.108335742s" podCreationTimestamp="2025-10-11 04:47:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 04:47:49.102688829 +0000 UTC m=+3164.438978515" watchObservedRunningTime="2025-10-11 04:47:49.108335742 +0000 UTC m=+3164.444625428" Oct 11 04:47:49 crc kubenswrapper[4798]: I1011 04:47:49.130099 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0c09ff0d-de2b-4e1d-b39f-f467c8b03fb7-horizon-tls-certs" (OuterVolumeSpecName: "horizon-tls-certs") pod "0c09ff0d-de2b-4e1d-b39f-f467c8b03fb7" (UID: "0c09ff0d-de2b-4e1d-b39f-f467c8b03fb7"). InnerVolumeSpecName "horizon-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:47:49 crc kubenswrapper[4798]: I1011 04:47:49.207749 4798 reconciler_common.go:293] "Volume detached for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/0c09ff0d-de2b-4e1d-b39f-f467c8b03fb7-horizon-tls-certs\") on node \"crc\" DevicePath \"\"" Oct 11 04:47:49 crc kubenswrapper[4798]: I1011 04:47:49.323949 4798 scope.go:117] "RemoveContainer" containerID="e5a0bf3052f9a7a689c723780253fef10a5e615d7ab1c128a44088070fa7c599" Oct 11 04:47:49 crc kubenswrapper[4798]: I1011 04:47:49.382429 4798 scope.go:117] "RemoveContainer" containerID="6e79ada2ce3c5b65f6f244fb7a98a83d07af92873ba6a0cf203af681fb65f44d" Oct 11 04:47:49 crc kubenswrapper[4798]: E1011 04:47:49.384681 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6e79ada2ce3c5b65f6f244fb7a98a83d07af92873ba6a0cf203af681fb65f44d\": container with ID starting with 6e79ada2ce3c5b65f6f244fb7a98a83d07af92873ba6a0cf203af681fb65f44d not found: ID does not exist" containerID="6e79ada2ce3c5b65f6f244fb7a98a83d07af92873ba6a0cf203af681fb65f44d" Oct 11 04:47:49 crc kubenswrapper[4798]: I1011 04:47:49.384764 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6e79ada2ce3c5b65f6f244fb7a98a83d07af92873ba6a0cf203af681fb65f44d"} err="failed to get container status \"6e79ada2ce3c5b65f6f244fb7a98a83d07af92873ba6a0cf203af681fb65f44d\": rpc error: code = NotFound desc = could not find container \"6e79ada2ce3c5b65f6f244fb7a98a83d07af92873ba6a0cf203af681fb65f44d\": container with ID starting with 6e79ada2ce3c5b65f6f244fb7a98a83d07af92873ba6a0cf203af681fb65f44d not found: ID does not exist" Oct 11 04:47:49 crc kubenswrapper[4798]: I1011 04:47:49.384811 4798 scope.go:117] "RemoveContainer" containerID="e5a0bf3052f9a7a689c723780253fef10a5e615d7ab1c128a44088070fa7c599" Oct 11 04:47:49 crc kubenswrapper[4798]: E1011 04:47:49.385540 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e5a0bf3052f9a7a689c723780253fef10a5e615d7ab1c128a44088070fa7c599\": container with ID starting with e5a0bf3052f9a7a689c723780253fef10a5e615d7ab1c128a44088070fa7c599 not found: ID does not exist" containerID="e5a0bf3052f9a7a689c723780253fef10a5e615d7ab1c128a44088070fa7c599" Oct 11 04:47:49 crc kubenswrapper[4798]: I1011 04:47:49.385616 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e5a0bf3052f9a7a689c723780253fef10a5e615d7ab1c128a44088070fa7c599"} err="failed to get container status \"e5a0bf3052f9a7a689c723780253fef10a5e615d7ab1c128a44088070fa7c599\": rpc error: code = NotFound desc = could not find container \"e5a0bf3052f9a7a689c723780253fef10a5e615d7ab1c128a44088070fa7c599\": container with ID starting with e5a0bf3052f9a7a689c723780253fef10a5e615d7ab1c128a44088070fa7c599 not found: ID does not exist" Oct 11 04:47:49 crc kubenswrapper[4798]: I1011 04:47:49.441697 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="778270e6-b796-4692-b341-fb2091a1576e" path="/var/lib/kubelet/pods/778270e6-b796-4692-b341-fb2091a1576e/volumes" Oct 11 04:47:49 crc kubenswrapper[4798]: I1011 04:47:49.442785 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-78d45886d-tlzfl"] Oct 11 04:47:49 crc kubenswrapper[4798]: I1011 04:47:49.444365 4798 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-78d45886d-tlzfl"] Oct 11 04:47:51 crc kubenswrapper[4798]: I1011 04:47:51.344062 4798 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/manila-scheduler-0" Oct 11 04:47:51 crc kubenswrapper[4798]: I1011 04:47:51.448429 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0c09ff0d-de2b-4e1d-b39f-f467c8b03fb7" path="/var/lib/kubelet/pods/0c09ff0d-de2b-4e1d-b39f-f467c8b03fb7/volumes" Oct 11 04:47:56 crc kubenswrapper[4798]: I1011 04:47:56.412166 4798 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/manila-share-share1-0" Oct 11 04:48:02 crc kubenswrapper[4798]: I1011 04:48:02.941708 4798 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/manila-scheduler-0" Oct 11 04:48:04 crc kubenswrapper[4798]: I1011 04:48:04.590533 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Oct 11 04:48:08 crc kubenswrapper[4798]: I1011 04:48:08.217343 4798 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/manila-share-share1-0" Oct 11 04:49:15 crc kubenswrapper[4798]: I1011 04:49:15.493884 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/tempest-tests-tempest"] Oct 11 04:49:15 crc kubenswrapper[4798]: E1011 04:49:15.495418 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0c09ff0d-de2b-4e1d-b39f-f467c8b03fb7" containerName="horizon" Oct 11 04:49:15 crc kubenswrapper[4798]: I1011 04:49:15.495444 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="0c09ff0d-de2b-4e1d-b39f-f467c8b03fb7" containerName="horizon" Oct 11 04:49:15 crc kubenswrapper[4798]: E1011 04:49:15.495514 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0c09ff0d-de2b-4e1d-b39f-f467c8b03fb7" containerName="horizon-log" Oct 11 04:49:15 crc kubenswrapper[4798]: I1011 04:49:15.495527 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="0c09ff0d-de2b-4e1d-b39f-f467c8b03fb7" containerName="horizon-log" Oct 11 04:49:15 crc kubenswrapper[4798]: I1011 04:49:15.495908 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="0c09ff0d-de2b-4e1d-b39f-f467c8b03fb7" containerName="horizon-log" Oct 11 04:49:15 crc kubenswrapper[4798]: I1011 04:49:15.495944 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="0c09ff0d-de2b-4e1d-b39f-f467c8b03fb7" containerName="horizon" Oct 11 04:49:15 crc kubenswrapper[4798]: I1011 04:49:15.497606 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest" Oct 11 04:49:15 crc kubenswrapper[4798]: I1011 04:49:15.505799 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/tempest-tests-tempest"] Oct 11 04:49:15 crc kubenswrapper[4798]: I1011 04:49:15.530177 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"test-operator-controller-priv-key" Oct 11 04:49:15 crc kubenswrapper[4798]: I1011 04:49:15.530248 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"default-dockercfg-5wwt4" Oct 11 04:49:15 crc kubenswrapper[4798]: I1011 04:49:15.530584 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"tempest-tests-tempest-custom-data-s0" Oct 11 04:49:15 crc kubenswrapper[4798]: I1011 04:49:15.531968 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"tempest-tests-tempest-env-vars-s0" Oct 11 04:49:15 crc kubenswrapper[4798]: I1011 04:49:15.629543 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/b3b2c8d8-3245-48a3-be3a-099046cf7258-ca-certs\") pod \"tempest-tests-tempest\" (UID: \"b3b2c8d8-3245-48a3-be3a-099046cf7258\") " pod="openstack/tempest-tests-tempest" Oct 11 04:49:15 crc kubenswrapper[4798]: I1011 04:49:15.630081 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/b3b2c8d8-3245-48a3-be3a-099046cf7258-openstack-config-secret\") pod \"tempest-tests-tempest\" (UID: \"b3b2c8d8-3245-48a3-be3a-099046cf7258\") " pod="openstack/tempest-tests-tempest" Oct 11 04:49:15 crc kubenswrapper[4798]: I1011 04:49:15.630145 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-77cd5\" (UniqueName: \"kubernetes.io/projected/b3b2c8d8-3245-48a3-be3a-099046cf7258-kube-api-access-77cd5\") pod \"tempest-tests-tempest\" (UID: \"b3b2c8d8-3245-48a3-be3a-099046cf7258\") " pod="openstack/tempest-tests-tempest" Oct 11 04:49:15 crc kubenswrapper[4798]: I1011 04:49:15.630178 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/b3b2c8d8-3245-48a3-be3a-099046cf7258-openstack-config\") pod \"tempest-tests-tempest\" (UID: \"b3b2c8d8-3245-48a3-be3a-099046cf7258\") " pod="openstack/tempest-tests-tempest" Oct 11 04:49:15 crc kubenswrapper[4798]: I1011 04:49:15.630207 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"tempest-tests-tempest\" (UID: \"b3b2c8d8-3245-48a3-be3a-099046cf7258\") " pod="openstack/tempest-tests-tempest" Oct 11 04:49:15 crc kubenswrapper[4798]: I1011 04:49:15.630270 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/b3b2c8d8-3245-48a3-be3a-099046cf7258-config-data\") pod \"tempest-tests-tempest\" (UID: \"b3b2c8d8-3245-48a3-be3a-099046cf7258\") " pod="openstack/tempest-tests-tempest" Oct 11 04:49:15 crc kubenswrapper[4798]: I1011 04:49:15.630322 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b3b2c8d8-3245-48a3-be3a-099046cf7258-ssh-key\") pod \"tempest-tests-tempest\" (UID: \"b3b2c8d8-3245-48a3-be3a-099046cf7258\") " pod="openstack/tempest-tests-tempest" Oct 11 04:49:15 crc kubenswrapper[4798]: I1011 04:49:15.630349 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/b3b2c8d8-3245-48a3-be3a-099046cf7258-test-operator-ephemeral-workdir\") pod \"tempest-tests-tempest\" (UID: \"b3b2c8d8-3245-48a3-be3a-099046cf7258\") " pod="openstack/tempest-tests-tempest" Oct 11 04:49:15 crc kubenswrapper[4798]: I1011 04:49:15.630373 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/b3b2c8d8-3245-48a3-be3a-099046cf7258-test-operator-ephemeral-temporary\") pod \"tempest-tests-tempest\" (UID: \"b3b2c8d8-3245-48a3-be3a-099046cf7258\") " pod="openstack/tempest-tests-tempest" Oct 11 04:49:15 crc kubenswrapper[4798]: I1011 04:49:15.732872 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/b3b2c8d8-3245-48a3-be3a-099046cf7258-config-data\") pod \"tempest-tests-tempest\" (UID: \"b3b2c8d8-3245-48a3-be3a-099046cf7258\") " pod="openstack/tempest-tests-tempest" Oct 11 04:49:15 crc kubenswrapper[4798]: I1011 04:49:15.732964 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b3b2c8d8-3245-48a3-be3a-099046cf7258-ssh-key\") pod \"tempest-tests-tempest\" (UID: \"b3b2c8d8-3245-48a3-be3a-099046cf7258\") " pod="openstack/tempest-tests-tempest" Oct 11 04:49:15 crc kubenswrapper[4798]: I1011 04:49:15.732995 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/b3b2c8d8-3245-48a3-be3a-099046cf7258-test-operator-ephemeral-workdir\") pod \"tempest-tests-tempest\" (UID: \"b3b2c8d8-3245-48a3-be3a-099046cf7258\") " pod="openstack/tempest-tests-tempest" Oct 11 04:49:15 crc kubenswrapper[4798]: I1011 04:49:15.733022 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/b3b2c8d8-3245-48a3-be3a-099046cf7258-test-operator-ephemeral-temporary\") pod \"tempest-tests-tempest\" (UID: \"b3b2c8d8-3245-48a3-be3a-099046cf7258\") " pod="openstack/tempest-tests-tempest" Oct 11 04:49:15 crc kubenswrapper[4798]: I1011 04:49:15.733054 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/b3b2c8d8-3245-48a3-be3a-099046cf7258-ca-certs\") pod \"tempest-tests-tempest\" (UID: \"b3b2c8d8-3245-48a3-be3a-099046cf7258\") " pod="openstack/tempest-tests-tempest" Oct 11 04:49:15 crc kubenswrapper[4798]: I1011 04:49:15.733097 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/b3b2c8d8-3245-48a3-be3a-099046cf7258-openstack-config-secret\") pod \"tempest-tests-tempest\" (UID: \"b3b2c8d8-3245-48a3-be3a-099046cf7258\") " pod="openstack/tempest-tests-tempest" Oct 11 04:49:15 crc kubenswrapper[4798]: I1011 04:49:15.733144 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-77cd5\" (UniqueName: \"kubernetes.io/projected/b3b2c8d8-3245-48a3-be3a-099046cf7258-kube-api-access-77cd5\") pod \"tempest-tests-tempest\" (UID: \"b3b2c8d8-3245-48a3-be3a-099046cf7258\") " pod="openstack/tempest-tests-tempest" Oct 11 04:49:15 crc kubenswrapper[4798]: I1011 04:49:15.733170 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/b3b2c8d8-3245-48a3-be3a-099046cf7258-openstack-config\") pod \"tempest-tests-tempest\" (UID: \"b3b2c8d8-3245-48a3-be3a-099046cf7258\") " pod="openstack/tempest-tests-tempest" Oct 11 04:49:15 crc kubenswrapper[4798]: I1011 04:49:15.733204 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"tempest-tests-tempest\" (UID: \"b3b2c8d8-3245-48a3-be3a-099046cf7258\") " pod="openstack/tempest-tests-tempest" Oct 11 04:49:15 crc kubenswrapper[4798]: I1011 04:49:15.733673 4798 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"tempest-tests-tempest\" (UID: \"b3b2c8d8-3245-48a3-be3a-099046cf7258\") device mount path \"/mnt/openstack/pv08\"" pod="openstack/tempest-tests-tempest" Oct 11 04:49:15 crc kubenswrapper[4798]: I1011 04:49:15.734744 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/b3b2c8d8-3245-48a3-be3a-099046cf7258-test-operator-ephemeral-temporary\") pod \"tempest-tests-tempest\" (UID: \"b3b2c8d8-3245-48a3-be3a-099046cf7258\") " pod="openstack/tempest-tests-tempest" Oct 11 04:49:15 crc kubenswrapper[4798]: I1011 04:49:15.734993 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/b3b2c8d8-3245-48a3-be3a-099046cf7258-openstack-config\") pod \"tempest-tests-tempest\" (UID: \"b3b2c8d8-3245-48a3-be3a-099046cf7258\") " pod="openstack/tempest-tests-tempest" Oct 11 04:49:15 crc kubenswrapper[4798]: I1011 04:49:15.735335 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/b3b2c8d8-3245-48a3-be3a-099046cf7258-test-operator-ephemeral-workdir\") pod \"tempest-tests-tempest\" (UID: \"b3b2c8d8-3245-48a3-be3a-099046cf7258\") " pod="openstack/tempest-tests-tempest" Oct 11 04:49:15 crc kubenswrapper[4798]: I1011 04:49:15.735763 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/b3b2c8d8-3245-48a3-be3a-099046cf7258-config-data\") pod \"tempest-tests-tempest\" (UID: \"b3b2c8d8-3245-48a3-be3a-099046cf7258\") " pod="openstack/tempest-tests-tempest" Oct 11 04:49:15 crc kubenswrapper[4798]: I1011 04:49:15.742699 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/b3b2c8d8-3245-48a3-be3a-099046cf7258-openstack-config-secret\") pod \"tempest-tests-tempest\" (UID: \"b3b2c8d8-3245-48a3-be3a-099046cf7258\") " pod="openstack/tempest-tests-tempest" Oct 11 04:49:15 crc kubenswrapper[4798]: I1011 04:49:15.743101 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b3b2c8d8-3245-48a3-be3a-099046cf7258-ssh-key\") pod \"tempest-tests-tempest\" (UID: \"b3b2c8d8-3245-48a3-be3a-099046cf7258\") " pod="openstack/tempest-tests-tempest" Oct 11 04:49:15 crc kubenswrapper[4798]: I1011 04:49:15.743705 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/b3b2c8d8-3245-48a3-be3a-099046cf7258-ca-certs\") pod \"tempest-tests-tempest\" (UID: \"b3b2c8d8-3245-48a3-be3a-099046cf7258\") " pod="openstack/tempest-tests-tempest" Oct 11 04:49:15 crc kubenswrapper[4798]: I1011 04:49:15.768017 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"tempest-tests-tempest\" (UID: \"b3b2c8d8-3245-48a3-be3a-099046cf7258\") " pod="openstack/tempest-tests-tempest" Oct 11 04:49:15 crc kubenswrapper[4798]: I1011 04:49:15.774452 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-77cd5\" (UniqueName: \"kubernetes.io/projected/b3b2c8d8-3245-48a3-be3a-099046cf7258-kube-api-access-77cd5\") pod \"tempest-tests-tempest\" (UID: \"b3b2c8d8-3245-48a3-be3a-099046cf7258\") " pod="openstack/tempest-tests-tempest" Oct 11 04:49:15 crc kubenswrapper[4798]: I1011 04:49:15.868015 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest" Oct 11 04:49:16 crc kubenswrapper[4798]: I1011 04:49:16.200758 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/tempest-tests-tempest"] Oct 11 04:49:17 crc kubenswrapper[4798]: I1011 04:49:17.190219 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest" event={"ID":"b3b2c8d8-3245-48a3-be3a-099046cf7258","Type":"ContainerStarted","Data":"c2019c07467b5462271a938e008560e2bb76709f3638f79fdcbf99b755452c32"} Oct 11 04:49:46 crc kubenswrapper[4798]: E1011 04:49:46.480957 4798 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-tempest-all:current-podified" Oct 11 04:49:46 crc kubenswrapper[4798]: E1011 04:49:46.482489 4798 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:tempest-tests-tempest-tests-runner,Image:quay.io/podified-antelope-centos9/openstack-tempest-all:current-podified,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:test-operator-ephemeral-workdir,ReadOnly:false,MountPath:/var/lib/tempest,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:test-operator-ephemeral-temporary,ReadOnly:false,MountPath:/tmp,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/etc/test_operator,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:test-operator-logs,ReadOnly:false,MountPath:/var/lib/tempest/external_files,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:openstack-config,ReadOnly:true,MountPath:/etc/openstack/clouds.yaml,SubPath:clouds.yaml,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:openstack-config,ReadOnly:true,MountPath:/var/lib/tempest/.config/openstack/clouds.yaml,SubPath:clouds.yaml,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:openstack-config-secret,ReadOnly:false,MountPath:/etc/openstack/secure.yaml,SubPath:secure.yaml,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ca-certs,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ssh-key,ReadOnly:false,MountPath:/var/lib/tempest/id_ecdsa,SubPath:ssh_key,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-77cd5,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42480,RunAsNonRoot:*false,ReadOnlyRootFilesystem:*false,AllowPrivilegeEscalation:*true,RunAsGroup:*42480,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{EnvFromSource{Prefix:,ConfigMapRef:&ConfigMapEnvSource{LocalObjectReference:LocalObjectReference{Name:tempest-tests-tempest-custom-data-s0,},Optional:nil,},SecretRef:nil,},EnvFromSource{Prefix:,ConfigMapRef:&ConfigMapEnvSource{LocalObjectReference:LocalObjectReference{Name:tempest-tests-tempest-env-vars-s0,},Optional:nil,},SecretRef:nil,},},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod tempest-tests-tempest_openstack(b3b2c8d8-3245-48a3-be3a-099046cf7258): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Oct 11 04:49:46 crc kubenswrapper[4798]: E1011 04:49:46.483805 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"tempest-tests-tempest-tests-runner\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/tempest-tests-tempest" podUID="b3b2c8d8-3245-48a3-be3a-099046cf7258" Oct 11 04:49:46 crc kubenswrapper[4798]: E1011 04:49:46.544052 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"tempest-tests-tempest-tests-runner\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-tempest-all:current-podified\\\"\"" pod="openstack/tempest-tests-tempest" podUID="b3b2c8d8-3245-48a3-be3a-099046cf7258" Oct 11 04:49:57 crc kubenswrapper[4798]: I1011 04:49:57.139192 4798 patch_prober.go:28] interesting pod/machine-config-daemon-h28s2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 11 04:49:57 crc kubenswrapper[4798]: I1011 04:49:57.140356 4798 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 11 04:50:02 crc kubenswrapper[4798]: I1011 04:50:02.826460 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest" event={"ID":"b3b2c8d8-3245-48a3-be3a-099046cf7258","Type":"ContainerStarted","Data":"7f8f349b21d5c10e3bac64ece6b39de4df0bb89dc8edb59a0a0c95b9f7c0116e"} Oct 11 04:50:27 crc kubenswrapper[4798]: I1011 04:50:27.138252 4798 patch_prober.go:28] interesting pod/machine-config-daemon-h28s2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 11 04:50:27 crc kubenswrapper[4798]: I1011 04:50:27.139096 4798 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 11 04:50:57 crc kubenswrapper[4798]: I1011 04:50:57.138881 4798 patch_prober.go:28] interesting pod/machine-config-daemon-h28s2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 11 04:50:57 crc kubenswrapper[4798]: I1011 04:50:57.139790 4798 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 11 04:50:57 crc kubenswrapper[4798]: I1011 04:50:57.139862 4798 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" Oct 11 04:50:57 crc kubenswrapper[4798]: I1011 04:50:57.143360 4798 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"d5e0dca4104eeca7c9a76b11cb7ae2acd478e963ddf4e0a9647a5803b7ebfc90"} pod="openshift-machine-config-operator/machine-config-daemon-h28s2" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Oct 11 04:50:57 crc kubenswrapper[4798]: I1011 04:50:57.143556 4798 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" containerName="machine-config-daemon" containerID="cri-o://d5e0dca4104eeca7c9a76b11cb7ae2acd478e963ddf4e0a9647a5803b7ebfc90" gracePeriod=600 Oct 11 04:50:57 crc kubenswrapper[4798]: I1011 04:50:57.528985 4798 generic.go:334] "Generic (PLEG): container finished" podID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" containerID="d5e0dca4104eeca7c9a76b11cb7ae2acd478e963ddf4e0a9647a5803b7ebfc90" exitCode=0 Oct 11 04:50:57 crc kubenswrapper[4798]: I1011 04:50:57.529059 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" event={"ID":"42571bc8-2186-4e3b-bba9-28f5a8f364d0","Type":"ContainerDied","Data":"d5e0dca4104eeca7c9a76b11cb7ae2acd478e963ddf4e0a9647a5803b7ebfc90"} Oct 11 04:50:57 crc kubenswrapper[4798]: I1011 04:50:57.529623 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" event={"ID":"42571bc8-2186-4e3b-bba9-28f5a8f364d0","Type":"ContainerStarted","Data":"115ccad5a85066a43a5690fe450fc2e02e8e332700c139e6cb8f105f9866a92d"} Oct 11 04:50:57 crc kubenswrapper[4798]: I1011 04:50:57.529651 4798 scope.go:117] "RemoveContainer" containerID="999be6c75e4c41e3d1de7a3a50ee34084c0f4ead3f5f5fdebf32b1d0054c6b5f" Oct 11 04:50:57 crc kubenswrapper[4798]: I1011 04:50:57.554340 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/tempest-tests-tempest" podStartSLOduration=58.819965553 podStartE2EDuration="1m43.554314053s" podCreationTimestamp="2025-10-11 04:49:14 +0000 UTC" firstStartedPulling="2025-10-11 04:49:16.219603062 +0000 UTC m=+3251.555892758" lastFinishedPulling="2025-10-11 04:50:00.953951572 +0000 UTC m=+3296.290241258" observedRunningTime="2025-10-11 04:50:02.848813719 +0000 UTC m=+3298.185103415" watchObservedRunningTime="2025-10-11 04:50:57.554314053 +0000 UTC m=+3352.890603739" Oct 11 04:51:09 crc kubenswrapper[4798]: I1011 04:51:09.534503 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-cstsv"] Oct 11 04:51:09 crc kubenswrapper[4798]: I1011 04:51:09.539117 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-cstsv" Oct 11 04:51:09 crc kubenswrapper[4798]: I1011 04:51:09.558859 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-cstsv"] Oct 11 04:51:09 crc kubenswrapper[4798]: I1011 04:51:09.701009 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z5lb6\" (UniqueName: \"kubernetes.io/projected/27f9ad6b-0a6a-4e43-b157-840e1ac08b96-kube-api-access-z5lb6\") pod \"redhat-operators-cstsv\" (UID: \"27f9ad6b-0a6a-4e43-b157-840e1ac08b96\") " pod="openshift-marketplace/redhat-operators-cstsv" Oct 11 04:51:09 crc kubenswrapper[4798]: I1011 04:51:09.701197 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/27f9ad6b-0a6a-4e43-b157-840e1ac08b96-utilities\") pod \"redhat-operators-cstsv\" (UID: \"27f9ad6b-0a6a-4e43-b157-840e1ac08b96\") " pod="openshift-marketplace/redhat-operators-cstsv" Oct 11 04:51:09 crc kubenswrapper[4798]: I1011 04:51:09.701350 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/27f9ad6b-0a6a-4e43-b157-840e1ac08b96-catalog-content\") pod \"redhat-operators-cstsv\" (UID: \"27f9ad6b-0a6a-4e43-b157-840e1ac08b96\") " pod="openshift-marketplace/redhat-operators-cstsv" Oct 11 04:51:09 crc kubenswrapper[4798]: I1011 04:51:09.803415 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/27f9ad6b-0a6a-4e43-b157-840e1ac08b96-catalog-content\") pod \"redhat-operators-cstsv\" (UID: \"27f9ad6b-0a6a-4e43-b157-840e1ac08b96\") " pod="openshift-marketplace/redhat-operators-cstsv" Oct 11 04:51:09 crc kubenswrapper[4798]: I1011 04:51:09.803606 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z5lb6\" (UniqueName: \"kubernetes.io/projected/27f9ad6b-0a6a-4e43-b157-840e1ac08b96-kube-api-access-z5lb6\") pod \"redhat-operators-cstsv\" (UID: \"27f9ad6b-0a6a-4e43-b157-840e1ac08b96\") " pod="openshift-marketplace/redhat-operators-cstsv" Oct 11 04:51:09 crc kubenswrapper[4798]: I1011 04:51:09.803712 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/27f9ad6b-0a6a-4e43-b157-840e1ac08b96-utilities\") pod \"redhat-operators-cstsv\" (UID: \"27f9ad6b-0a6a-4e43-b157-840e1ac08b96\") " pod="openshift-marketplace/redhat-operators-cstsv" Oct 11 04:51:09 crc kubenswrapper[4798]: I1011 04:51:09.804421 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/27f9ad6b-0a6a-4e43-b157-840e1ac08b96-catalog-content\") pod \"redhat-operators-cstsv\" (UID: \"27f9ad6b-0a6a-4e43-b157-840e1ac08b96\") " pod="openshift-marketplace/redhat-operators-cstsv" Oct 11 04:51:09 crc kubenswrapper[4798]: I1011 04:51:09.804830 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/27f9ad6b-0a6a-4e43-b157-840e1ac08b96-utilities\") pod \"redhat-operators-cstsv\" (UID: \"27f9ad6b-0a6a-4e43-b157-840e1ac08b96\") " pod="openshift-marketplace/redhat-operators-cstsv" Oct 11 04:51:09 crc kubenswrapper[4798]: I1011 04:51:09.826047 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z5lb6\" (UniqueName: \"kubernetes.io/projected/27f9ad6b-0a6a-4e43-b157-840e1ac08b96-kube-api-access-z5lb6\") pod \"redhat-operators-cstsv\" (UID: \"27f9ad6b-0a6a-4e43-b157-840e1ac08b96\") " pod="openshift-marketplace/redhat-operators-cstsv" Oct 11 04:51:09 crc kubenswrapper[4798]: I1011 04:51:09.911320 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-cstsv" Oct 11 04:51:10 crc kubenswrapper[4798]: I1011 04:51:10.391947 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-cstsv"] Oct 11 04:51:10 crc kubenswrapper[4798]: I1011 04:51:10.694764 4798 generic.go:334] "Generic (PLEG): container finished" podID="27f9ad6b-0a6a-4e43-b157-840e1ac08b96" containerID="fd99212e831c35561d84f91e14f12482db1035bc8a48e2d8164c564ad674e6ac" exitCode=0 Oct 11 04:51:10 crc kubenswrapper[4798]: I1011 04:51:10.695129 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-cstsv" event={"ID":"27f9ad6b-0a6a-4e43-b157-840e1ac08b96","Type":"ContainerDied","Data":"fd99212e831c35561d84f91e14f12482db1035bc8a48e2d8164c564ad674e6ac"} Oct 11 04:51:10 crc kubenswrapper[4798]: I1011 04:51:10.695176 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-cstsv" event={"ID":"27f9ad6b-0a6a-4e43-b157-840e1ac08b96","Type":"ContainerStarted","Data":"d5c56f466e7117fa3e85ff7928980c8afb48fb3387ebc30a5dc0bf3a7c4d8bff"} Oct 11 04:51:11 crc kubenswrapper[4798]: I1011 04:51:11.755896 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-cstsv" event={"ID":"27f9ad6b-0a6a-4e43-b157-840e1ac08b96","Type":"ContainerStarted","Data":"f1a13d27329532c8a289c88b941b95efb05679eb0f2793b004d0c7ad87953a13"} Oct 11 04:51:12 crc kubenswrapper[4798]: I1011 04:51:12.770186 4798 generic.go:334] "Generic (PLEG): container finished" podID="27f9ad6b-0a6a-4e43-b157-840e1ac08b96" containerID="f1a13d27329532c8a289c88b941b95efb05679eb0f2793b004d0c7ad87953a13" exitCode=0 Oct 11 04:51:12 crc kubenswrapper[4798]: I1011 04:51:12.770298 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-cstsv" event={"ID":"27f9ad6b-0a6a-4e43-b157-840e1ac08b96","Type":"ContainerDied","Data":"f1a13d27329532c8a289c88b941b95efb05679eb0f2793b004d0c7ad87953a13"} Oct 11 04:51:13 crc kubenswrapper[4798]: I1011 04:51:13.788463 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-cstsv" event={"ID":"27f9ad6b-0a6a-4e43-b157-840e1ac08b96","Type":"ContainerStarted","Data":"4ade9b78f432bc09d4889f009c2bfa523c11ce3c4bb66fbf6fd4a4387fcb731c"} Oct 11 04:51:13 crc kubenswrapper[4798]: I1011 04:51:13.835589 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-cstsv" podStartSLOduration=2.2520873679999998 podStartE2EDuration="4.835563186s" podCreationTimestamp="2025-10-11 04:51:09 +0000 UTC" firstStartedPulling="2025-10-11 04:51:10.698317185 +0000 UTC m=+3366.034606861" lastFinishedPulling="2025-10-11 04:51:13.281792993 +0000 UTC m=+3368.618082679" observedRunningTime="2025-10-11 04:51:13.816180511 +0000 UTC m=+3369.152470197" watchObservedRunningTime="2025-10-11 04:51:13.835563186 +0000 UTC m=+3369.171852872" Oct 11 04:51:19 crc kubenswrapper[4798]: I1011 04:51:19.912557 4798 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-cstsv" Oct 11 04:51:19 crc kubenswrapper[4798]: I1011 04:51:19.913537 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-cstsv" Oct 11 04:51:20 crc kubenswrapper[4798]: I1011 04:51:20.008333 4798 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-cstsv" Oct 11 04:51:20 crc kubenswrapper[4798]: I1011 04:51:20.954617 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-cstsv" Oct 11 04:51:21 crc kubenswrapper[4798]: I1011 04:51:21.022827 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-cstsv"] Oct 11 04:51:22 crc kubenswrapper[4798]: I1011 04:51:22.896746 4798 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-cstsv" podUID="27f9ad6b-0a6a-4e43-b157-840e1ac08b96" containerName="registry-server" containerID="cri-o://4ade9b78f432bc09d4889f009c2bfa523c11ce3c4bb66fbf6fd4a4387fcb731c" gracePeriod=2 Oct 11 04:51:23 crc kubenswrapper[4798]: I1011 04:51:23.500112 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-cstsv" Oct 11 04:51:23 crc kubenswrapper[4798]: I1011 04:51:23.523694 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z5lb6\" (UniqueName: \"kubernetes.io/projected/27f9ad6b-0a6a-4e43-b157-840e1ac08b96-kube-api-access-z5lb6\") pod \"27f9ad6b-0a6a-4e43-b157-840e1ac08b96\" (UID: \"27f9ad6b-0a6a-4e43-b157-840e1ac08b96\") " Oct 11 04:51:23 crc kubenswrapper[4798]: I1011 04:51:23.523795 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/27f9ad6b-0a6a-4e43-b157-840e1ac08b96-utilities\") pod \"27f9ad6b-0a6a-4e43-b157-840e1ac08b96\" (UID: \"27f9ad6b-0a6a-4e43-b157-840e1ac08b96\") " Oct 11 04:51:23 crc kubenswrapper[4798]: I1011 04:51:23.524000 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/27f9ad6b-0a6a-4e43-b157-840e1ac08b96-catalog-content\") pod \"27f9ad6b-0a6a-4e43-b157-840e1ac08b96\" (UID: \"27f9ad6b-0a6a-4e43-b157-840e1ac08b96\") " Oct 11 04:51:23 crc kubenswrapper[4798]: I1011 04:51:23.535579 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/27f9ad6b-0a6a-4e43-b157-840e1ac08b96-utilities" (OuterVolumeSpecName: "utilities") pod "27f9ad6b-0a6a-4e43-b157-840e1ac08b96" (UID: "27f9ad6b-0a6a-4e43-b157-840e1ac08b96"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 04:51:23 crc kubenswrapper[4798]: I1011 04:51:23.555043 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/27f9ad6b-0a6a-4e43-b157-840e1ac08b96-kube-api-access-z5lb6" (OuterVolumeSpecName: "kube-api-access-z5lb6") pod "27f9ad6b-0a6a-4e43-b157-840e1ac08b96" (UID: "27f9ad6b-0a6a-4e43-b157-840e1ac08b96"). InnerVolumeSpecName "kube-api-access-z5lb6". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 04:51:23 crc kubenswrapper[4798]: I1011 04:51:23.627229 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z5lb6\" (UniqueName: \"kubernetes.io/projected/27f9ad6b-0a6a-4e43-b157-840e1ac08b96-kube-api-access-z5lb6\") on node \"crc\" DevicePath \"\"" Oct 11 04:51:23 crc kubenswrapper[4798]: I1011 04:51:23.627266 4798 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/27f9ad6b-0a6a-4e43-b157-840e1ac08b96-utilities\") on node \"crc\" DevicePath \"\"" Oct 11 04:51:23 crc kubenswrapper[4798]: I1011 04:51:23.645306 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/27f9ad6b-0a6a-4e43-b157-840e1ac08b96-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "27f9ad6b-0a6a-4e43-b157-840e1ac08b96" (UID: "27f9ad6b-0a6a-4e43-b157-840e1ac08b96"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 04:51:23 crc kubenswrapper[4798]: I1011 04:51:23.730219 4798 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/27f9ad6b-0a6a-4e43-b157-840e1ac08b96-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 11 04:51:23 crc kubenswrapper[4798]: I1011 04:51:23.909308 4798 generic.go:334] "Generic (PLEG): container finished" podID="27f9ad6b-0a6a-4e43-b157-840e1ac08b96" containerID="4ade9b78f432bc09d4889f009c2bfa523c11ce3c4bb66fbf6fd4a4387fcb731c" exitCode=0 Oct 11 04:51:23 crc kubenswrapper[4798]: I1011 04:51:23.909428 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-cstsv" event={"ID":"27f9ad6b-0a6a-4e43-b157-840e1ac08b96","Type":"ContainerDied","Data":"4ade9b78f432bc09d4889f009c2bfa523c11ce3c4bb66fbf6fd4a4387fcb731c"} Oct 11 04:51:23 crc kubenswrapper[4798]: I1011 04:51:23.909446 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-cstsv" Oct 11 04:51:23 crc kubenswrapper[4798]: I1011 04:51:23.909510 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-cstsv" event={"ID":"27f9ad6b-0a6a-4e43-b157-840e1ac08b96","Type":"ContainerDied","Data":"d5c56f466e7117fa3e85ff7928980c8afb48fb3387ebc30a5dc0bf3a7c4d8bff"} Oct 11 04:51:23 crc kubenswrapper[4798]: I1011 04:51:23.909561 4798 scope.go:117] "RemoveContainer" containerID="4ade9b78f432bc09d4889f009c2bfa523c11ce3c4bb66fbf6fd4a4387fcb731c" Oct 11 04:51:23 crc kubenswrapper[4798]: I1011 04:51:23.971411 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-cstsv"] Oct 11 04:51:23 crc kubenswrapper[4798]: I1011 04:51:23.981032 4798 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-cstsv"] Oct 11 04:51:23 crc kubenswrapper[4798]: I1011 04:51:23.983019 4798 scope.go:117] "RemoveContainer" containerID="f1a13d27329532c8a289c88b941b95efb05679eb0f2793b004d0c7ad87953a13" Oct 11 04:51:24 crc kubenswrapper[4798]: I1011 04:51:24.023714 4798 scope.go:117] "RemoveContainer" containerID="fd99212e831c35561d84f91e14f12482db1035bc8a48e2d8164c564ad674e6ac" Oct 11 04:51:24 crc kubenswrapper[4798]: I1011 04:51:24.091170 4798 scope.go:117] "RemoveContainer" containerID="4ade9b78f432bc09d4889f009c2bfa523c11ce3c4bb66fbf6fd4a4387fcb731c" Oct 11 04:51:24 crc kubenswrapper[4798]: E1011 04:51:24.091862 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4ade9b78f432bc09d4889f009c2bfa523c11ce3c4bb66fbf6fd4a4387fcb731c\": container with ID starting with 4ade9b78f432bc09d4889f009c2bfa523c11ce3c4bb66fbf6fd4a4387fcb731c not found: ID does not exist" containerID="4ade9b78f432bc09d4889f009c2bfa523c11ce3c4bb66fbf6fd4a4387fcb731c" Oct 11 04:51:24 crc kubenswrapper[4798]: I1011 04:51:24.091899 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4ade9b78f432bc09d4889f009c2bfa523c11ce3c4bb66fbf6fd4a4387fcb731c"} err="failed to get container status \"4ade9b78f432bc09d4889f009c2bfa523c11ce3c4bb66fbf6fd4a4387fcb731c\": rpc error: code = NotFound desc = could not find container \"4ade9b78f432bc09d4889f009c2bfa523c11ce3c4bb66fbf6fd4a4387fcb731c\": container with ID starting with 4ade9b78f432bc09d4889f009c2bfa523c11ce3c4bb66fbf6fd4a4387fcb731c not found: ID does not exist" Oct 11 04:51:24 crc kubenswrapper[4798]: I1011 04:51:24.091925 4798 scope.go:117] "RemoveContainer" containerID="f1a13d27329532c8a289c88b941b95efb05679eb0f2793b004d0c7ad87953a13" Oct 11 04:51:24 crc kubenswrapper[4798]: E1011 04:51:24.092659 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f1a13d27329532c8a289c88b941b95efb05679eb0f2793b004d0c7ad87953a13\": container with ID starting with f1a13d27329532c8a289c88b941b95efb05679eb0f2793b004d0c7ad87953a13 not found: ID does not exist" containerID="f1a13d27329532c8a289c88b941b95efb05679eb0f2793b004d0c7ad87953a13" Oct 11 04:51:24 crc kubenswrapper[4798]: I1011 04:51:24.092897 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f1a13d27329532c8a289c88b941b95efb05679eb0f2793b004d0c7ad87953a13"} err="failed to get container status \"f1a13d27329532c8a289c88b941b95efb05679eb0f2793b004d0c7ad87953a13\": rpc error: code = NotFound desc = could not find container \"f1a13d27329532c8a289c88b941b95efb05679eb0f2793b004d0c7ad87953a13\": container with ID starting with f1a13d27329532c8a289c88b941b95efb05679eb0f2793b004d0c7ad87953a13 not found: ID does not exist" Oct 11 04:51:24 crc kubenswrapper[4798]: I1011 04:51:24.093146 4798 scope.go:117] "RemoveContainer" containerID="fd99212e831c35561d84f91e14f12482db1035bc8a48e2d8164c564ad674e6ac" Oct 11 04:51:24 crc kubenswrapper[4798]: E1011 04:51:24.093825 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fd99212e831c35561d84f91e14f12482db1035bc8a48e2d8164c564ad674e6ac\": container with ID starting with fd99212e831c35561d84f91e14f12482db1035bc8a48e2d8164c564ad674e6ac not found: ID does not exist" containerID="fd99212e831c35561d84f91e14f12482db1035bc8a48e2d8164c564ad674e6ac" Oct 11 04:51:24 crc kubenswrapper[4798]: I1011 04:51:24.093851 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fd99212e831c35561d84f91e14f12482db1035bc8a48e2d8164c564ad674e6ac"} err="failed to get container status \"fd99212e831c35561d84f91e14f12482db1035bc8a48e2d8164c564ad674e6ac\": rpc error: code = NotFound desc = could not find container \"fd99212e831c35561d84f91e14f12482db1035bc8a48e2d8164c564ad674e6ac\": container with ID starting with fd99212e831c35561d84f91e14f12482db1035bc8a48e2d8164c564ad674e6ac not found: ID does not exist" Oct 11 04:51:25 crc kubenswrapper[4798]: I1011 04:51:25.452334 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="27f9ad6b-0a6a-4e43-b157-840e1ac08b96" path="/var/lib/kubelet/pods/27f9ad6b-0a6a-4e43-b157-840e1ac08b96/volumes" Oct 11 04:52:03 crc kubenswrapper[4798]: I1011 04:52:03.660540 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-mkzcg"] Oct 11 04:52:03 crc kubenswrapper[4798]: E1011 04:52:03.661879 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="27f9ad6b-0a6a-4e43-b157-840e1ac08b96" containerName="extract-utilities" Oct 11 04:52:03 crc kubenswrapper[4798]: I1011 04:52:03.661894 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="27f9ad6b-0a6a-4e43-b157-840e1ac08b96" containerName="extract-utilities" Oct 11 04:52:03 crc kubenswrapper[4798]: E1011 04:52:03.661921 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="27f9ad6b-0a6a-4e43-b157-840e1ac08b96" containerName="registry-server" Oct 11 04:52:03 crc kubenswrapper[4798]: I1011 04:52:03.661929 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="27f9ad6b-0a6a-4e43-b157-840e1ac08b96" containerName="registry-server" Oct 11 04:52:03 crc kubenswrapper[4798]: E1011 04:52:03.661946 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="27f9ad6b-0a6a-4e43-b157-840e1ac08b96" containerName="extract-content" Oct 11 04:52:03 crc kubenswrapper[4798]: I1011 04:52:03.661952 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="27f9ad6b-0a6a-4e43-b157-840e1ac08b96" containerName="extract-content" Oct 11 04:52:03 crc kubenswrapper[4798]: I1011 04:52:03.662119 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="27f9ad6b-0a6a-4e43-b157-840e1ac08b96" containerName="registry-server" Oct 11 04:52:03 crc kubenswrapper[4798]: I1011 04:52:03.666725 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-mkzcg" Oct 11 04:52:03 crc kubenswrapper[4798]: I1011 04:52:03.690447 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-mkzcg"] Oct 11 04:52:03 crc kubenswrapper[4798]: I1011 04:52:03.783284 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/513ca0de-9151-46b4-be48-90a9da71ea19-catalog-content\") pod \"certified-operators-mkzcg\" (UID: \"513ca0de-9151-46b4-be48-90a9da71ea19\") " pod="openshift-marketplace/certified-operators-mkzcg" Oct 11 04:52:03 crc kubenswrapper[4798]: I1011 04:52:03.783380 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/513ca0de-9151-46b4-be48-90a9da71ea19-utilities\") pod \"certified-operators-mkzcg\" (UID: \"513ca0de-9151-46b4-be48-90a9da71ea19\") " pod="openshift-marketplace/certified-operators-mkzcg" Oct 11 04:52:03 crc kubenswrapper[4798]: I1011 04:52:03.783454 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ng5j4\" (UniqueName: \"kubernetes.io/projected/513ca0de-9151-46b4-be48-90a9da71ea19-kube-api-access-ng5j4\") pod \"certified-operators-mkzcg\" (UID: \"513ca0de-9151-46b4-be48-90a9da71ea19\") " pod="openshift-marketplace/certified-operators-mkzcg" Oct 11 04:52:03 crc kubenswrapper[4798]: I1011 04:52:03.885777 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/513ca0de-9151-46b4-be48-90a9da71ea19-catalog-content\") pod \"certified-operators-mkzcg\" (UID: \"513ca0de-9151-46b4-be48-90a9da71ea19\") " pod="openshift-marketplace/certified-operators-mkzcg" Oct 11 04:52:03 crc kubenswrapper[4798]: I1011 04:52:03.885869 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/513ca0de-9151-46b4-be48-90a9da71ea19-utilities\") pod \"certified-operators-mkzcg\" (UID: \"513ca0de-9151-46b4-be48-90a9da71ea19\") " pod="openshift-marketplace/certified-operators-mkzcg" Oct 11 04:52:03 crc kubenswrapper[4798]: I1011 04:52:03.885924 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ng5j4\" (UniqueName: \"kubernetes.io/projected/513ca0de-9151-46b4-be48-90a9da71ea19-kube-api-access-ng5j4\") pod \"certified-operators-mkzcg\" (UID: \"513ca0de-9151-46b4-be48-90a9da71ea19\") " pod="openshift-marketplace/certified-operators-mkzcg" Oct 11 04:52:03 crc kubenswrapper[4798]: I1011 04:52:03.886306 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/513ca0de-9151-46b4-be48-90a9da71ea19-catalog-content\") pod \"certified-operators-mkzcg\" (UID: \"513ca0de-9151-46b4-be48-90a9da71ea19\") " pod="openshift-marketplace/certified-operators-mkzcg" Oct 11 04:52:03 crc kubenswrapper[4798]: I1011 04:52:03.886496 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/513ca0de-9151-46b4-be48-90a9da71ea19-utilities\") pod \"certified-operators-mkzcg\" (UID: \"513ca0de-9151-46b4-be48-90a9da71ea19\") " pod="openshift-marketplace/certified-operators-mkzcg" Oct 11 04:52:03 crc kubenswrapper[4798]: I1011 04:52:03.910760 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ng5j4\" (UniqueName: \"kubernetes.io/projected/513ca0de-9151-46b4-be48-90a9da71ea19-kube-api-access-ng5j4\") pod \"certified-operators-mkzcg\" (UID: \"513ca0de-9151-46b4-be48-90a9da71ea19\") " pod="openshift-marketplace/certified-operators-mkzcg" Oct 11 04:52:04 crc kubenswrapper[4798]: I1011 04:52:04.000468 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-mkzcg" Oct 11 04:52:04 crc kubenswrapper[4798]: I1011 04:52:04.594054 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-mkzcg"] Oct 11 04:52:05 crc kubenswrapper[4798]: I1011 04:52:05.435805 4798 generic.go:334] "Generic (PLEG): container finished" podID="513ca0de-9151-46b4-be48-90a9da71ea19" containerID="ff02cd330d4460fe002da041ae1ca7df4991c85d2ce3970c38587327a665af80" exitCode=0 Oct 11 04:52:05 crc kubenswrapper[4798]: I1011 04:52:05.448207 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-mkzcg" event={"ID":"513ca0de-9151-46b4-be48-90a9da71ea19","Type":"ContainerDied","Data":"ff02cd330d4460fe002da041ae1ca7df4991c85d2ce3970c38587327a665af80"} Oct 11 04:52:05 crc kubenswrapper[4798]: I1011 04:52:05.448376 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-mkzcg" event={"ID":"513ca0de-9151-46b4-be48-90a9da71ea19","Type":"ContainerStarted","Data":"3b70d32d403cb7e4af481cda5b90954b7a94e3e7a715a5cc5b7f2ac623f8078c"} Oct 11 04:52:05 crc kubenswrapper[4798]: I1011 04:52:05.457474 4798 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Oct 11 04:52:07 crc kubenswrapper[4798]: I1011 04:52:07.471079 4798 generic.go:334] "Generic (PLEG): container finished" podID="513ca0de-9151-46b4-be48-90a9da71ea19" containerID="00c12ad05a1573ee80157dc31ca8a685483fe08e9caddd6ecfa54192f0751502" exitCode=0 Oct 11 04:52:07 crc kubenswrapper[4798]: I1011 04:52:07.471775 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-mkzcg" event={"ID":"513ca0de-9151-46b4-be48-90a9da71ea19","Type":"ContainerDied","Data":"00c12ad05a1573ee80157dc31ca8a685483fe08e9caddd6ecfa54192f0751502"} Oct 11 04:52:08 crc kubenswrapper[4798]: I1011 04:52:08.494589 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-mkzcg" event={"ID":"513ca0de-9151-46b4-be48-90a9da71ea19","Type":"ContainerStarted","Data":"f0cdde051c3355326fdfb0ecfe1b108c9c95a886f98d375f87436a99509bfa36"} Oct 11 04:52:08 crc kubenswrapper[4798]: I1011 04:52:08.538662 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-mkzcg" podStartSLOduration=3.081797458 podStartE2EDuration="5.538640543s" podCreationTimestamp="2025-10-11 04:52:03 +0000 UTC" firstStartedPulling="2025-10-11 04:52:05.451750533 +0000 UTC m=+3420.788040259" lastFinishedPulling="2025-10-11 04:52:07.908593618 +0000 UTC m=+3423.244883344" observedRunningTime="2025-10-11 04:52:08.52999538 +0000 UTC m=+3423.866285066" watchObservedRunningTime="2025-10-11 04:52:08.538640543 +0000 UTC m=+3423.874930219" Oct 11 04:52:14 crc kubenswrapper[4798]: I1011 04:52:14.001070 4798 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-mkzcg" Oct 11 04:52:14 crc kubenswrapper[4798]: I1011 04:52:14.001787 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-mkzcg" Oct 11 04:52:14 crc kubenswrapper[4798]: I1011 04:52:14.095006 4798 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-mkzcg" Oct 11 04:52:14 crc kubenswrapper[4798]: I1011 04:52:14.663321 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-mkzcg" Oct 11 04:52:14 crc kubenswrapper[4798]: I1011 04:52:14.742870 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-mkzcg"] Oct 11 04:52:16 crc kubenswrapper[4798]: I1011 04:52:16.594847 4798 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-mkzcg" podUID="513ca0de-9151-46b4-be48-90a9da71ea19" containerName="registry-server" containerID="cri-o://f0cdde051c3355326fdfb0ecfe1b108c9c95a886f98d375f87436a99509bfa36" gracePeriod=2 Oct 11 04:52:17 crc kubenswrapper[4798]: I1011 04:52:17.067956 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-2tp6k"] Oct 11 04:52:17 crc kubenswrapper[4798]: I1011 04:52:17.072805 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-2tp6k" Oct 11 04:52:17 crc kubenswrapper[4798]: I1011 04:52:17.089108 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-2tp6k"] Oct 11 04:52:17 crc kubenswrapper[4798]: I1011 04:52:17.146451 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-mkzcg" Oct 11 04:52:17 crc kubenswrapper[4798]: I1011 04:52:17.252632 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ng5j4\" (UniqueName: \"kubernetes.io/projected/513ca0de-9151-46b4-be48-90a9da71ea19-kube-api-access-ng5j4\") pod \"513ca0de-9151-46b4-be48-90a9da71ea19\" (UID: \"513ca0de-9151-46b4-be48-90a9da71ea19\") " Oct 11 04:52:17 crc kubenswrapper[4798]: I1011 04:52:17.252692 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/513ca0de-9151-46b4-be48-90a9da71ea19-utilities\") pod \"513ca0de-9151-46b4-be48-90a9da71ea19\" (UID: \"513ca0de-9151-46b4-be48-90a9da71ea19\") " Oct 11 04:52:17 crc kubenswrapper[4798]: I1011 04:52:17.252968 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/513ca0de-9151-46b4-be48-90a9da71ea19-catalog-content\") pod \"513ca0de-9151-46b4-be48-90a9da71ea19\" (UID: \"513ca0de-9151-46b4-be48-90a9da71ea19\") " Oct 11 04:52:17 crc kubenswrapper[4798]: I1011 04:52:17.253264 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9c41f3f6-49b4-484c-9363-a21ab01c5967-catalog-content\") pod \"community-operators-2tp6k\" (UID: \"9c41f3f6-49b4-484c-9363-a21ab01c5967\") " pod="openshift-marketplace/community-operators-2tp6k" Oct 11 04:52:17 crc kubenswrapper[4798]: I1011 04:52:17.253472 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9c41f3f6-49b4-484c-9363-a21ab01c5967-utilities\") pod \"community-operators-2tp6k\" (UID: \"9c41f3f6-49b4-484c-9363-a21ab01c5967\") " pod="openshift-marketplace/community-operators-2tp6k" Oct 11 04:52:17 crc kubenswrapper[4798]: I1011 04:52:17.253502 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-97b7r\" (UniqueName: \"kubernetes.io/projected/9c41f3f6-49b4-484c-9363-a21ab01c5967-kube-api-access-97b7r\") pod \"community-operators-2tp6k\" (UID: \"9c41f3f6-49b4-484c-9363-a21ab01c5967\") " pod="openshift-marketplace/community-operators-2tp6k" Oct 11 04:52:17 crc kubenswrapper[4798]: I1011 04:52:17.254338 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/513ca0de-9151-46b4-be48-90a9da71ea19-utilities" (OuterVolumeSpecName: "utilities") pod "513ca0de-9151-46b4-be48-90a9da71ea19" (UID: "513ca0de-9151-46b4-be48-90a9da71ea19"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 04:52:17 crc kubenswrapper[4798]: I1011 04:52:17.278854 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/513ca0de-9151-46b4-be48-90a9da71ea19-kube-api-access-ng5j4" (OuterVolumeSpecName: "kube-api-access-ng5j4") pod "513ca0de-9151-46b4-be48-90a9da71ea19" (UID: "513ca0de-9151-46b4-be48-90a9da71ea19"). InnerVolumeSpecName "kube-api-access-ng5j4". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 04:52:17 crc kubenswrapper[4798]: I1011 04:52:17.297625 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/513ca0de-9151-46b4-be48-90a9da71ea19-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "513ca0de-9151-46b4-be48-90a9da71ea19" (UID: "513ca0de-9151-46b4-be48-90a9da71ea19"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 04:52:17 crc kubenswrapper[4798]: I1011 04:52:17.355965 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9c41f3f6-49b4-484c-9363-a21ab01c5967-utilities\") pod \"community-operators-2tp6k\" (UID: \"9c41f3f6-49b4-484c-9363-a21ab01c5967\") " pod="openshift-marketplace/community-operators-2tp6k" Oct 11 04:52:17 crc kubenswrapper[4798]: I1011 04:52:17.356020 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-97b7r\" (UniqueName: \"kubernetes.io/projected/9c41f3f6-49b4-484c-9363-a21ab01c5967-kube-api-access-97b7r\") pod \"community-operators-2tp6k\" (UID: \"9c41f3f6-49b4-484c-9363-a21ab01c5967\") " pod="openshift-marketplace/community-operators-2tp6k" Oct 11 04:52:17 crc kubenswrapper[4798]: I1011 04:52:17.356068 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9c41f3f6-49b4-484c-9363-a21ab01c5967-catalog-content\") pod \"community-operators-2tp6k\" (UID: \"9c41f3f6-49b4-484c-9363-a21ab01c5967\") " pod="openshift-marketplace/community-operators-2tp6k" Oct 11 04:52:17 crc kubenswrapper[4798]: I1011 04:52:17.356159 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ng5j4\" (UniqueName: \"kubernetes.io/projected/513ca0de-9151-46b4-be48-90a9da71ea19-kube-api-access-ng5j4\") on node \"crc\" DevicePath \"\"" Oct 11 04:52:17 crc kubenswrapper[4798]: I1011 04:52:17.356174 4798 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/513ca0de-9151-46b4-be48-90a9da71ea19-utilities\") on node \"crc\" DevicePath \"\"" Oct 11 04:52:17 crc kubenswrapper[4798]: I1011 04:52:17.356184 4798 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/513ca0de-9151-46b4-be48-90a9da71ea19-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 11 04:52:17 crc kubenswrapper[4798]: I1011 04:52:17.357369 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9c41f3f6-49b4-484c-9363-a21ab01c5967-catalog-content\") pod \"community-operators-2tp6k\" (UID: \"9c41f3f6-49b4-484c-9363-a21ab01c5967\") " pod="openshift-marketplace/community-operators-2tp6k" Oct 11 04:52:17 crc kubenswrapper[4798]: I1011 04:52:17.359914 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9c41f3f6-49b4-484c-9363-a21ab01c5967-utilities\") pod \"community-operators-2tp6k\" (UID: \"9c41f3f6-49b4-484c-9363-a21ab01c5967\") " pod="openshift-marketplace/community-operators-2tp6k" Oct 11 04:52:17 crc kubenswrapper[4798]: I1011 04:52:17.381097 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-97b7r\" (UniqueName: \"kubernetes.io/projected/9c41f3f6-49b4-484c-9363-a21ab01c5967-kube-api-access-97b7r\") pod \"community-operators-2tp6k\" (UID: \"9c41f3f6-49b4-484c-9363-a21ab01c5967\") " pod="openshift-marketplace/community-operators-2tp6k" Oct 11 04:52:17 crc kubenswrapper[4798]: I1011 04:52:17.464052 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-2tp6k" Oct 11 04:52:17 crc kubenswrapper[4798]: I1011 04:52:17.620279 4798 generic.go:334] "Generic (PLEG): container finished" podID="513ca0de-9151-46b4-be48-90a9da71ea19" containerID="f0cdde051c3355326fdfb0ecfe1b108c9c95a886f98d375f87436a99509bfa36" exitCode=0 Oct 11 04:52:17 crc kubenswrapper[4798]: I1011 04:52:17.620677 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-mkzcg" event={"ID":"513ca0de-9151-46b4-be48-90a9da71ea19","Type":"ContainerDied","Data":"f0cdde051c3355326fdfb0ecfe1b108c9c95a886f98d375f87436a99509bfa36"} Oct 11 04:52:17 crc kubenswrapper[4798]: I1011 04:52:17.620718 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-mkzcg" event={"ID":"513ca0de-9151-46b4-be48-90a9da71ea19","Type":"ContainerDied","Data":"3b70d32d403cb7e4af481cda5b90954b7a94e3e7a715a5cc5b7f2ac623f8078c"} Oct 11 04:52:17 crc kubenswrapper[4798]: I1011 04:52:17.620736 4798 scope.go:117] "RemoveContainer" containerID="f0cdde051c3355326fdfb0ecfe1b108c9c95a886f98d375f87436a99509bfa36" Oct 11 04:52:17 crc kubenswrapper[4798]: I1011 04:52:17.620921 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-mkzcg" Oct 11 04:52:17 crc kubenswrapper[4798]: I1011 04:52:17.669955 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-mkzcg"] Oct 11 04:52:17 crc kubenswrapper[4798]: I1011 04:52:17.683664 4798 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-mkzcg"] Oct 11 04:52:17 crc kubenswrapper[4798]: I1011 04:52:17.703674 4798 scope.go:117] "RemoveContainer" containerID="00c12ad05a1573ee80157dc31ca8a685483fe08e9caddd6ecfa54192f0751502" Oct 11 04:52:17 crc kubenswrapper[4798]: I1011 04:52:17.775426 4798 scope.go:117] "RemoveContainer" containerID="ff02cd330d4460fe002da041ae1ca7df4991c85d2ce3970c38587327a665af80" Oct 11 04:52:17 crc kubenswrapper[4798]: I1011 04:52:17.833901 4798 scope.go:117] "RemoveContainer" containerID="f0cdde051c3355326fdfb0ecfe1b108c9c95a886f98d375f87436a99509bfa36" Oct 11 04:52:17 crc kubenswrapper[4798]: E1011 04:52:17.834453 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f0cdde051c3355326fdfb0ecfe1b108c9c95a886f98d375f87436a99509bfa36\": container with ID starting with f0cdde051c3355326fdfb0ecfe1b108c9c95a886f98d375f87436a99509bfa36 not found: ID does not exist" containerID="f0cdde051c3355326fdfb0ecfe1b108c9c95a886f98d375f87436a99509bfa36" Oct 11 04:52:17 crc kubenswrapper[4798]: I1011 04:52:17.834492 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f0cdde051c3355326fdfb0ecfe1b108c9c95a886f98d375f87436a99509bfa36"} err="failed to get container status \"f0cdde051c3355326fdfb0ecfe1b108c9c95a886f98d375f87436a99509bfa36\": rpc error: code = NotFound desc = could not find container \"f0cdde051c3355326fdfb0ecfe1b108c9c95a886f98d375f87436a99509bfa36\": container with ID starting with f0cdde051c3355326fdfb0ecfe1b108c9c95a886f98d375f87436a99509bfa36 not found: ID does not exist" Oct 11 04:52:17 crc kubenswrapper[4798]: I1011 04:52:17.834513 4798 scope.go:117] "RemoveContainer" containerID="00c12ad05a1573ee80157dc31ca8a685483fe08e9caddd6ecfa54192f0751502" Oct 11 04:52:17 crc kubenswrapper[4798]: E1011 04:52:17.835870 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"00c12ad05a1573ee80157dc31ca8a685483fe08e9caddd6ecfa54192f0751502\": container with ID starting with 00c12ad05a1573ee80157dc31ca8a685483fe08e9caddd6ecfa54192f0751502 not found: ID does not exist" containerID="00c12ad05a1573ee80157dc31ca8a685483fe08e9caddd6ecfa54192f0751502" Oct 11 04:52:17 crc kubenswrapper[4798]: I1011 04:52:17.835892 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"00c12ad05a1573ee80157dc31ca8a685483fe08e9caddd6ecfa54192f0751502"} err="failed to get container status \"00c12ad05a1573ee80157dc31ca8a685483fe08e9caddd6ecfa54192f0751502\": rpc error: code = NotFound desc = could not find container \"00c12ad05a1573ee80157dc31ca8a685483fe08e9caddd6ecfa54192f0751502\": container with ID starting with 00c12ad05a1573ee80157dc31ca8a685483fe08e9caddd6ecfa54192f0751502 not found: ID does not exist" Oct 11 04:52:17 crc kubenswrapper[4798]: I1011 04:52:17.835904 4798 scope.go:117] "RemoveContainer" containerID="ff02cd330d4460fe002da041ae1ca7df4991c85d2ce3970c38587327a665af80" Oct 11 04:52:17 crc kubenswrapper[4798]: E1011 04:52:17.836101 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ff02cd330d4460fe002da041ae1ca7df4991c85d2ce3970c38587327a665af80\": container with ID starting with ff02cd330d4460fe002da041ae1ca7df4991c85d2ce3970c38587327a665af80 not found: ID does not exist" containerID="ff02cd330d4460fe002da041ae1ca7df4991c85d2ce3970c38587327a665af80" Oct 11 04:52:17 crc kubenswrapper[4798]: I1011 04:52:17.836119 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ff02cd330d4460fe002da041ae1ca7df4991c85d2ce3970c38587327a665af80"} err="failed to get container status \"ff02cd330d4460fe002da041ae1ca7df4991c85d2ce3970c38587327a665af80\": rpc error: code = NotFound desc = could not find container \"ff02cd330d4460fe002da041ae1ca7df4991c85d2ce3970c38587327a665af80\": container with ID starting with ff02cd330d4460fe002da041ae1ca7df4991c85d2ce3970c38587327a665af80 not found: ID does not exist" Oct 11 04:52:18 crc kubenswrapper[4798]: I1011 04:52:18.052227 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-2tp6k"] Oct 11 04:52:18 crc kubenswrapper[4798]: I1011 04:52:18.637049 4798 generic.go:334] "Generic (PLEG): container finished" podID="9c41f3f6-49b4-484c-9363-a21ab01c5967" containerID="bb3e14052a2c51a3f05b3099627d0b3d0fe2755d7ab049f3b2c16fae207c1ec0" exitCode=0 Oct 11 04:52:18 crc kubenswrapper[4798]: I1011 04:52:18.637117 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-2tp6k" event={"ID":"9c41f3f6-49b4-484c-9363-a21ab01c5967","Type":"ContainerDied","Data":"bb3e14052a2c51a3f05b3099627d0b3d0fe2755d7ab049f3b2c16fae207c1ec0"} Oct 11 04:52:18 crc kubenswrapper[4798]: I1011 04:52:18.637530 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-2tp6k" event={"ID":"9c41f3f6-49b4-484c-9363-a21ab01c5967","Type":"ContainerStarted","Data":"cd0e4fc1135720c267af067b379f26fdbb34b8695896620a378a81a98b46154d"} Oct 11 04:52:19 crc kubenswrapper[4798]: I1011 04:52:19.441275 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="513ca0de-9151-46b4-be48-90a9da71ea19" path="/var/lib/kubelet/pods/513ca0de-9151-46b4-be48-90a9da71ea19/volumes" Oct 11 04:52:19 crc kubenswrapper[4798]: I1011 04:52:19.667736 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-2tp6k" event={"ID":"9c41f3f6-49b4-484c-9363-a21ab01c5967","Type":"ContainerStarted","Data":"23d5fca149cb2e708c3d5eb6a42b9def561c7df36513ed1899949ef2c02ac599"} Oct 11 04:52:20 crc kubenswrapper[4798]: I1011 04:52:20.681553 4798 generic.go:334] "Generic (PLEG): container finished" podID="9c41f3f6-49b4-484c-9363-a21ab01c5967" containerID="23d5fca149cb2e708c3d5eb6a42b9def561c7df36513ed1899949ef2c02ac599" exitCode=0 Oct 11 04:52:20 crc kubenswrapper[4798]: I1011 04:52:20.682175 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-2tp6k" event={"ID":"9c41f3f6-49b4-484c-9363-a21ab01c5967","Type":"ContainerDied","Data":"23d5fca149cb2e708c3d5eb6a42b9def561c7df36513ed1899949ef2c02ac599"} Oct 11 04:52:21 crc kubenswrapper[4798]: I1011 04:52:21.705841 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-2tp6k" event={"ID":"9c41f3f6-49b4-484c-9363-a21ab01c5967","Type":"ContainerStarted","Data":"ce445eabe4b57d7db3ab94a7024959d6c821ba5e021608a4c742856f5296427b"} Oct 11 04:52:21 crc kubenswrapper[4798]: I1011 04:52:21.744984 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-2tp6k" podStartSLOduration=2.267067043 podStartE2EDuration="4.744963093s" podCreationTimestamp="2025-10-11 04:52:17 +0000 UTC" firstStartedPulling="2025-10-11 04:52:18.640198874 +0000 UTC m=+3433.976488570" lastFinishedPulling="2025-10-11 04:52:21.118094924 +0000 UTC m=+3436.454384620" observedRunningTime="2025-10-11 04:52:21.742887915 +0000 UTC m=+3437.079177611" watchObservedRunningTime="2025-10-11 04:52:21.744963093 +0000 UTC m=+3437.081252779" Oct 11 04:52:27 crc kubenswrapper[4798]: I1011 04:52:27.464196 4798 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-2tp6k" Oct 11 04:52:27 crc kubenswrapper[4798]: I1011 04:52:27.465063 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-2tp6k" Oct 11 04:52:27 crc kubenswrapper[4798]: I1011 04:52:27.540096 4798 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-2tp6k" Oct 11 04:52:27 crc kubenswrapper[4798]: I1011 04:52:27.867193 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-2tp6k" Oct 11 04:52:27 crc kubenswrapper[4798]: I1011 04:52:27.946145 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-2tp6k"] Oct 11 04:52:29 crc kubenswrapper[4798]: I1011 04:52:29.793894 4798 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-2tp6k" podUID="9c41f3f6-49b4-484c-9363-a21ab01c5967" containerName="registry-server" containerID="cri-o://ce445eabe4b57d7db3ab94a7024959d6c821ba5e021608a4c742856f5296427b" gracePeriod=2 Oct 11 04:52:30 crc kubenswrapper[4798]: I1011 04:52:30.372933 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-2tp6k" Oct 11 04:52:30 crc kubenswrapper[4798]: I1011 04:52:30.417672 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-97b7r\" (UniqueName: \"kubernetes.io/projected/9c41f3f6-49b4-484c-9363-a21ab01c5967-kube-api-access-97b7r\") pod \"9c41f3f6-49b4-484c-9363-a21ab01c5967\" (UID: \"9c41f3f6-49b4-484c-9363-a21ab01c5967\") " Oct 11 04:52:30 crc kubenswrapper[4798]: I1011 04:52:30.417744 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9c41f3f6-49b4-484c-9363-a21ab01c5967-utilities\") pod \"9c41f3f6-49b4-484c-9363-a21ab01c5967\" (UID: \"9c41f3f6-49b4-484c-9363-a21ab01c5967\") " Oct 11 04:52:30 crc kubenswrapper[4798]: I1011 04:52:30.417877 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9c41f3f6-49b4-484c-9363-a21ab01c5967-catalog-content\") pod \"9c41f3f6-49b4-484c-9363-a21ab01c5967\" (UID: \"9c41f3f6-49b4-484c-9363-a21ab01c5967\") " Oct 11 04:52:30 crc kubenswrapper[4798]: I1011 04:52:30.418881 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9c41f3f6-49b4-484c-9363-a21ab01c5967-utilities" (OuterVolumeSpecName: "utilities") pod "9c41f3f6-49b4-484c-9363-a21ab01c5967" (UID: "9c41f3f6-49b4-484c-9363-a21ab01c5967"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 04:52:30 crc kubenswrapper[4798]: I1011 04:52:30.428975 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9c41f3f6-49b4-484c-9363-a21ab01c5967-kube-api-access-97b7r" (OuterVolumeSpecName: "kube-api-access-97b7r") pod "9c41f3f6-49b4-484c-9363-a21ab01c5967" (UID: "9c41f3f6-49b4-484c-9363-a21ab01c5967"). InnerVolumeSpecName "kube-api-access-97b7r". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 04:52:30 crc kubenswrapper[4798]: I1011 04:52:30.464719 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9c41f3f6-49b4-484c-9363-a21ab01c5967-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "9c41f3f6-49b4-484c-9363-a21ab01c5967" (UID: "9c41f3f6-49b4-484c-9363-a21ab01c5967"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 04:52:30 crc kubenswrapper[4798]: I1011 04:52:30.520534 4798 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9c41f3f6-49b4-484c-9363-a21ab01c5967-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 11 04:52:30 crc kubenswrapper[4798]: I1011 04:52:30.520579 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-97b7r\" (UniqueName: \"kubernetes.io/projected/9c41f3f6-49b4-484c-9363-a21ab01c5967-kube-api-access-97b7r\") on node \"crc\" DevicePath \"\"" Oct 11 04:52:30 crc kubenswrapper[4798]: I1011 04:52:30.520593 4798 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9c41f3f6-49b4-484c-9363-a21ab01c5967-utilities\") on node \"crc\" DevicePath \"\"" Oct 11 04:52:30 crc kubenswrapper[4798]: I1011 04:52:30.813797 4798 generic.go:334] "Generic (PLEG): container finished" podID="9c41f3f6-49b4-484c-9363-a21ab01c5967" containerID="ce445eabe4b57d7db3ab94a7024959d6c821ba5e021608a4c742856f5296427b" exitCode=0 Oct 11 04:52:30 crc kubenswrapper[4798]: I1011 04:52:30.813859 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-2tp6k" event={"ID":"9c41f3f6-49b4-484c-9363-a21ab01c5967","Type":"ContainerDied","Data":"ce445eabe4b57d7db3ab94a7024959d6c821ba5e021608a4c742856f5296427b"} Oct 11 04:52:30 crc kubenswrapper[4798]: I1011 04:52:30.813896 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-2tp6k" event={"ID":"9c41f3f6-49b4-484c-9363-a21ab01c5967","Type":"ContainerDied","Data":"cd0e4fc1135720c267af067b379f26fdbb34b8695896620a378a81a98b46154d"} Oct 11 04:52:30 crc kubenswrapper[4798]: I1011 04:52:30.813921 4798 scope.go:117] "RemoveContainer" containerID="ce445eabe4b57d7db3ab94a7024959d6c821ba5e021608a4c742856f5296427b" Oct 11 04:52:30 crc kubenswrapper[4798]: I1011 04:52:30.814100 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-2tp6k" Oct 11 04:52:30 crc kubenswrapper[4798]: I1011 04:52:30.858048 4798 scope.go:117] "RemoveContainer" containerID="23d5fca149cb2e708c3d5eb6a42b9def561c7df36513ed1899949ef2c02ac599" Oct 11 04:52:30 crc kubenswrapper[4798]: I1011 04:52:30.866136 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-2tp6k"] Oct 11 04:52:30 crc kubenswrapper[4798]: I1011 04:52:30.888250 4798 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-2tp6k"] Oct 11 04:52:30 crc kubenswrapper[4798]: I1011 04:52:30.889925 4798 scope.go:117] "RemoveContainer" containerID="bb3e14052a2c51a3f05b3099627d0b3d0fe2755d7ab049f3b2c16fae207c1ec0" Oct 11 04:52:30 crc kubenswrapper[4798]: I1011 04:52:30.934644 4798 scope.go:117] "RemoveContainer" containerID="ce445eabe4b57d7db3ab94a7024959d6c821ba5e021608a4c742856f5296427b" Oct 11 04:52:30 crc kubenswrapper[4798]: E1011 04:52:30.935121 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ce445eabe4b57d7db3ab94a7024959d6c821ba5e021608a4c742856f5296427b\": container with ID starting with ce445eabe4b57d7db3ab94a7024959d6c821ba5e021608a4c742856f5296427b not found: ID does not exist" containerID="ce445eabe4b57d7db3ab94a7024959d6c821ba5e021608a4c742856f5296427b" Oct 11 04:52:30 crc kubenswrapper[4798]: I1011 04:52:30.935172 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ce445eabe4b57d7db3ab94a7024959d6c821ba5e021608a4c742856f5296427b"} err="failed to get container status \"ce445eabe4b57d7db3ab94a7024959d6c821ba5e021608a4c742856f5296427b\": rpc error: code = NotFound desc = could not find container \"ce445eabe4b57d7db3ab94a7024959d6c821ba5e021608a4c742856f5296427b\": container with ID starting with ce445eabe4b57d7db3ab94a7024959d6c821ba5e021608a4c742856f5296427b not found: ID does not exist" Oct 11 04:52:30 crc kubenswrapper[4798]: I1011 04:52:30.935204 4798 scope.go:117] "RemoveContainer" containerID="23d5fca149cb2e708c3d5eb6a42b9def561c7df36513ed1899949ef2c02ac599" Oct 11 04:52:30 crc kubenswrapper[4798]: E1011 04:52:30.935661 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"23d5fca149cb2e708c3d5eb6a42b9def561c7df36513ed1899949ef2c02ac599\": container with ID starting with 23d5fca149cb2e708c3d5eb6a42b9def561c7df36513ed1899949ef2c02ac599 not found: ID does not exist" containerID="23d5fca149cb2e708c3d5eb6a42b9def561c7df36513ed1899949ef2c02ac599" Oct 11 04:52:30 crc kubenswrapper[4798]: I1011 04:52:30.935708 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"23d5fca149cb2e708c3d5eb6a42b9def561c7df36513ed1899949ef2c02ac599"} err="failed to get container status \"23d5fca149cb2e708c3d5eb6a42b9def561c7df36513ed1899949ef2c02ac599\": rpc error: code = NotFound desc = could not find container \"23d5fca149cb2e708c3d5eb6a42b9def561c7df36513ed1899949ef2c02ac599\": container with ID starting with 23d5fca149cb2e708c3d5eb6a42b9def561c7df36513ed1899949ef2c02ac599 not found: ID does not exist" Oct 11 04:52:30 crc kubenswrapper[4798]: I1011 04:52:30.935739 4798 scope.go:117] "RemoveContainer" containerID="bb3e14052a2c51a3f05b3099627d0b3d0fe2755d7ab049f3b2c16fae207c1ec0" Oct 11 04:52:30 crc kubenswrapper[4798]: E1011 04:52:30.936286 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bb3e14052a2c51a3f05b3099627d0b3d0fe2755d7ab049f3b2c16fae207c1ec0\": container with ID starting with bb3e14052a2c51a3f05b3099627d0b3d0fe2755d7ab049f3b2c16fae207c1ec0 not found: ID does not exist" containerID="bb3e14052a2c51a3f05b3099627d0b3d0fe2755d7ab049f3b2c16fae207c1ec0" Oct 11 04:52:30 crc kubenswrapper[4798]: I1011 04:52:30.936321 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bb3e14052a2c51a3f05b3099627d0b3d0fe2755d7ab049f3b2c16fae207c1ec0"} err="failed to get container status \"bb3e14052a2c51a3f05b3099627d0b3d0fe2755d7ab049f3b2c16fae207c1ec0\": rpc error: code = NotFound desc = could not find container \"bb3e14052a2c51a3f05b3099627d0b3d0fe2755d7ab049f3b2c16fae207c1ec0\": container with ID starting with bb3e14052a2c51a3f05b3099627d0b3d0fe2755d7ab049f3b2c16fae207c1ec0 not found: ID does not exist" Oct 11 04:52:31 crc kubenswrapper[4798]: I1011 04:52:31.454491 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9c41f3f6-49b4-484c-9363-a21ab01c5967" path="/var/lib/kubelet/pods/9c41f3f6-49b4-484c-9363-a21ab01c5967/volumes" Oct 11 04:52:57 crc kubenswrapper[4798]: I1011 04:52:57.138632 4798 patch_prober.go:28] interesting pod/machine-config-daemon-h28s2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 11 04:52:57 crc kubenswrapper[4798]: I1011 04:52:57.139676 4798 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 11 04:53:07 crc kubenswrapper[4798]: I1011 04:53:07.802166 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-km6j5"] Oct 11 04:53:07 crc kubenswrapper[4798]: E1011 04:53:07.803255 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="513ca0de-9151-46b4-be48-90a9da71ea19" containerName="registry-server" Oct 11 04:53:07 crc kubenswrapper[4798]: I1011 04:53:07.803270 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="513ca0de-9151-46b4-be48-90a9da71ea19" containerName="registry-server" Oct 11 04:53:07 crc kubenswrapper[4798]: E1011 04:53:07.803287 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="513ca0de-9151-46b4-be48-90a9da71ea19" containerName="extract-utilities" Oct 11 04:53:07 crc kubenswrapper[4798]: I1011 04:53:07.803294 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="513ca0de-9151-46b4-be48-90a9da71ea19" containerName="extract-utilities" Oct 11 04:53:07 crc kubenswrapper[4798]: E1011 04:53:07.803306 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9c41f3f6-49b4-484c-9363-a21ab01c5967" containerName="extract-utilities" Oct 11 04:53:07 crc kubenswrapper[4798]: I1011 04:53:07.803312 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="9c41f3f6-49b4-484c-9363-a21ab01c5967" containerName="extract-utilities" Oct 11 04:53:07 crc kubenswrapper[4798]: E1011 04:53:07.803325 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9c41f3f6-49b4-484c-9363-a21ab01c5967" containerName="extract-content" Oct 11 04:53:07 crc kubenswrapper[4798]: I1011 04:53:07.803332 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="9c41f3f6-49b4-484c-9363-a21ab01c5967" containerName="extract-content" Oct 11 04:53:07 crc kubenswrapper[4798]: E1011 04:53:07.803341 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9c41f3f6-49b4-484c-9363-a21ab01c5967" containerName="registry-server" Oct 11 04:53:07 crc kubenswrapper[4798]: I1011 04:53:07.803347 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="9c41f3f6-49b4-484c-9363-a21ab01c5967" containerName="registry-server" Oct 11 04:53:07 crc kubenswrapper[4798]: E1011 04:53:07.803384 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="513ca0de-9151-46b4-be48-90a9da71ea19" containerName="extract-content" Oct 11 04:53:07 crc kubenswrapper[4798]: I1011 04:53:07.803409 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="513ca0de-9151-46b4-be48-90a9da71ea19" containerName="extract-content" Oct 11 04:53:07 crc kubenswrapper[4798]: I1011 04:53:07.803581 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="9c41f3f6-49b4-484c-9363-a21ab01c5967" containerName="registry-server" Oct 11 04:53:07 crc kubenswrapper[4798]: I1011 04:53:07.803601 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="513ca0de-9151-46b4-be48-90a9da71ea19" containerName="registry-server" Oct 11 04:53:07 crc kubenswrapper[4798]: I1011 04:53:07.805141 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-km6j5" Oct 11 04:53:07 crc kubenswrapper[4798]: I1011 04:53:07.828809 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-km6j5"] Oct 11 04:53:07 crc kubenswrapper[4798]: I1011 04:53:07.970793 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4vkww\" (UniqueName: \"kubernetes.io/projected/3938fb90-a727-4764-addb-42aa76b61bab-kube-api-access-4vkww\") pod \"redhat-marketplace-km6j5\" (UID: \"3938fb90-a727-4764-addb-42aa76b61bab\") " pod="openshift-marketplace/redhat-marketplace-km6j5" Oct 11 04:53:07 crc kubenswrapper[4798]: I1011 04:53:07.970901 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3938fb90-a727-4764-addb-42aa76b61bab-utilities\") pod \"redhat-marketplace-km6j5\" (UID: \"3938fb90-a727-4764-addb-42aa76b61bab\") " pod="openshift-marketplace/redhat-marketplace-km6j5" Oct 11 04:53:07 crc kubenswrapper[4798]: I1011 04:53:07.970970 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3938fb90-a727-4764-addb-42aa76b61bab-catalog-content\") pod \"redhat-marketplace-km6j5\" (UID: \"3938fb90-a727-4764-addb-42aa76b61bab\") " pod="openshift-marketplace/redhat-marketplace-km6j5" Oct 11 04:53:08 crc kubenswrapper[4798]: I1011 04:53:08.073944 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4vkww\" (UniqueName: \"kubernetes.io/projected/3938fb90-a727-4764-addb-42aa76b61bab-kube-api-access-4vkww\") pod \"redhat-marketplace-km6j5\" (UID: \"3938fb90-a727-4764-addb-42aa76b61bab\") " pod="openshift-marketplace/redhat-marketplace-km6j5" Oct 11 04:53:08 crc kubenswrapper[4798]: I1011 04:53:08.074032 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3938fb90-a727-4764-addb-42aa76b61bab-utilities\") pod \"redhat-marketplace-km6j5\" (UID: \"3938fb90-a727-4764-addb-42aa76b61bab\") " pod="openshift-marketplace/redhat-marketplace-km6j5" Oct 11 04:53:08 crc kubenswrapper[4798]: I1011 04:53:08.074081 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3938fb90-a727-4764-addb-42aa76b61bab-catalog-content\") pod \"redhat-marketplace-km6j5\" (UID: \"3938fb90-a727-4764-addb-42aa76b61bab\") " pod="openshift-marketplace/redhat-marketplace-km6j5" Oct 11 04:53:08 crc kubenswrapper[4798]: I1011 04:53:08.074993 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3938fb90-a727-4764-addb-42aa76b61bab-utilities\") pod \"redhat-marketplace-km6j5\" (UID: \"3938fb90-a727-4764-addb-42aa76b61bab\") " pod="openshift-marketplace/redhat-marketplace-km6j5" Oct 11 04:53:08 crc kubenswrapper[4798]: I1011 04:53:08.075143 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3938fb90-a727-4764-addb-42aa76b61bab-catalog-content\") pod \"redhat-marketplace-km6j5\" (UID: \"3938fb90-a727-4764-addb-42aa76b61bab\") " pod="openshift-marketplace/redhat-marketplace-km6j5" Oct 11 04:53:08 crc kubenswrapper[4798]: I1011 04:53:08.099952 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4vkww\" (UniqueName: \"kubernetes.io/projected/3938fb90-a727-4764-addb-42aa76b61bab-kube-api-access-4vkww\") pod \"redhat-marketplace-km6j5\" (UID: \"3938fb90-a727-4764-addb-42aa76b61bab\") " pod="openshift-marketplace/redhat-marketplace-km6j5" Oct 11 04:53:08 crc kubenswrapper[4798]: I1011 04:53:08.139106 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-km6j5" Oct 11 04:53:08 crc kubenswrapper[4798]: I1011 04:53:08.690312 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-km6j5"] Oct 11 04:53:08 crc kubenswrapper[4798]: W1011 04:53:08.692769 4798 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3938fb90_a727_4764_addb_42aa76b61bab.slice/crio-b5825cc27d7c613284d4d7c3714c0017aa9c7989e3e398efd77380e5bf5ccb9d WatchSource:0}: Error finding container b5825cc27d7c613284d4d7c3714c0017aa9c7989e3e398efd77380e5bf5ccb9d: Status 404 returned error can't find the container with id b5825cc27d7c613284d4d7c3714c0017aa9c7989e3e398efd77380e5bf5ccb9d Oct 11 04:53:09 crc kubenswrapper[4798]: I1011 04:53:09.238099 4798 generic.go:334] "Generic (PLEG): container finished" podID="3938fb90-a727-4764-addb-42aa76b61bab" containerID="1c3785c0f1ff26f73cadaf27027c4b02020794d3336da77907f704c6239b5686" exitCode=0 Oct 11 04:53:09 crc kubenswrapper[4798]: I1011 04:53:09.238337 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-km6j5" event={"ID":"3938fb90-a727-4764-addb-42aa76b61bab","Type":"ContainerDied","Data":"1c3785c0f1ff26f73cadaf27027c4b02020794d3336da77907f704c6239b5686"} Oct 11 04:53:09 crc kubenswrapper[4798]: I1011 04:53:09.238639 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-km6j5" event={"ID":"3938fb90-a727-4764-addb-42aa76b61bab","Type":"ContainerStarted","Data":"b5825cc27d7c613284d4d7c3714c0017aa9c7989e3e398efd77380e5bf5ccb9d"} Oct 11 04:53:10 crc kubenswrapper[4798]: I1011 04:53:10.253807 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-km6j5" event={"ID":"3938fb90-a727-4764-addb-42aa76b61bab","Type":"ContainerStarted","Data":"05a4d7dacd7ec5a805a8d5c14a8ad2d2db90ecbe30606d9397acac5785c56f37"} Oct 11 04:53:11 crc kubenswrapper[4798]: I1011 04:53:11.270355 4798 generic.go:334] "Generic (PLEG): container finished" podID="3938fb90-a727-4764-addb-42aa76b61bab" containerID="05a4d7dacd7ec5a805a8d5c14a8ad2d2db90ecbe30606d9397acac5785c56f37" exitCode=0 Oct 11 04:53:11 crc kubenswrapper[4798]: I1011 04:53:11.270447 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-km6j5" event={"ID":"3938fb90-a727-4764-addb-42aa76b61bab","Type":"ContainerDied","Data":"05a4d7dacd7ec5a805a8d5c14a8ad2d2db90ecbe30606d9397acac5785c56f37"} Oct 11 04:53:12 crc kubenswrapper[4798]: I1011 04:53:12.284580 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-km6j5" event={"ID":"3938fb90-a727-4764-addb-42aa76b61bab","Type":"ContainerStarted","Data":"3419b2640e62a205376f4d17d32bba9c1c704ae34b22c8b50e09286d9b7749b1"} Oct 11 04:53:12 crc kubenswrapper[4798]: I1011 04:53:12.313253 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-km6j5" podStartSLOduration=2.848725249 podStartE2EDuration="5.313232986s" podCreationTimestamp="2025-10-11 04:53:07 +0000 UTC" firstStartedPulling="2025-10-11 04:53:09.241187693 +0000 UTC m=+3484.577477389" lastFinishedPulling="2025-10-11 04:53:11.70569544 +0000 UTC m=+3487.041985126" observedRunningTime="2025-10-11 04:53:12.31170911 +0000 UTC m=+3487.647998796" watchObservedRunningTime="2025-10-11 04:53:12.313232986 +0000 UTC m=+3487.649522672" Oct 11 04:53:18 crc kubenswrapper[4798]: I1011 04:53:18.139518 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-km6j5" Oct 11 04:53:18 crc kubenswrapper[4798]: I1011 04:53:18.140516 4798 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-km6j5" Oct 11 04:53:18 crc kubenswrapper[4798]: I1011 04:53:18.203882 4798 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-km6j5" Oct 11 04:53:18 crc kubenswrapper[4798]: I1011 04:53:18.416749 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-km6j5" Oct 11 04:53:18 crc kubenswrapper[4798]: I1011 04:53:18.489384 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-km6j5"] Oct 11 04:53:20 crc kubenswrapper[4798]: I1011 04:53:20.383303 4798 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-km6j5" podUID="3938fb90-a727-4764-addb-42aa76b61bab" containerName="registry-server" containerID="cri-o://3419b2640e62a205376f4d17d32bba9c1c704ae34b22c8b50e09286d9b7749b1" gracePeriod=2 Oct 11 04:53:20 crc kubenswrapper[4798]: I1011 04:53:20.974025 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-km6j5" Oct 11 04:53:21 crc kubenswrapper[4798]: I1011 04:53:21.041077 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3938fb90-a727-4764-addb-42aa76b61bab-utilities\") pod \"3938fb90-a727-4764-addb-42aa76b61bab\" (UID: \"3938fb90-a727-4764-addb-42aa76b61bab\") " Oct 11 04:53:21 crc kubenswrapper[4798]: I1011 04:53:21.041248 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3938fb90-a727-4764-addb-42aa76b61bab-catalog-content\") pod \"3938fb90-a727-4764-addb-42aa76b61bab\" (UID: \"3938fb90-a727-4764-addb-42aa76b61bab\") " Oct 11 04:53:21 crc kubenswrapper[4798]: I1011 04:53:21.041678 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4vkww\" (UniqueName: \"kubernetes.io/projected/3938fb90-a727-4764-addb-42aa76b61bab-kube-api-access-4vkww\") pod \"3938fb90-a727-4764-addb-42aa76b61bab\" (UID: \"3938fb90-a727-4764-addb-42aa76b61bab\") " Oct 11 04:53:21 crc kubenswrapper[4798]: I1011 04:53:21.042292 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3938fb90-a727-4764-addb-42aa76b61bab-utilities" (OuterVolumeSpecName: "utilities") pod "3938fb90-a727-4764-addb-42aa76b61bab" (UID: "3938fb90-a727-4764-addb-42aa76b61bab"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 04:53:21 crc kubenswrapper[4798]: I1011 04:53:21.043202 4798 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3938fb90-a727-4764-addb-42aa76b61bab-utilities\") on node \"crc\" DevicePath \"\"" Oct 11 04:53:21 crc kubenswrapper[4798]: I1011 04:53:21.050359 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3938fb90-a727-4764-addb-42aa76b61bab-kube-api-access-4vkww" (OuterVolumeSpecName: "kube-api-access-4vkww") pod "3938fb90-a727-4764-addb-42aa76b61bab" (UID: "3938fb90-a727-4764-addb-42aa76b61bab"). InnerVolumeSpecName "kube-api-access-4vkww". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 04:53:21 crc kubenswrapper[4798]: I1011 04:53:21.056151 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3938fb90-a727-4764-addb-42aa76b61bab-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "3938fb90-a727-4764-addb-42aa76b61bab" (UID: "3938fb90-a727-4764-addb-42aa76b61bab"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 04:53:21 crc kubenswrapper[4798]: I1011 04:53:21.145022 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4vkww\" (UniqueName: \"kubernetes.io/projected/3938fb90-a727-4764-addb-42aa76b61bab-kube-api-access-4vkww\") on node \"crc\" DevicePath \"\"" Oct 11 04:53:21 crc kubenswrapper[4798]: I1011 04:53:21.145059 4798 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3938fb90-a727-4764-addb-42aa76b61bab-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 11 04:53:21 crc kubenswrapper[4798]: I1011 04:53:21.396101 4798 generic.go:334] "Generic (PLEG): container finished" podID="3938fb90-a727-4764-addb-42aa76b61bab" containerID="3419b2640e62a205376f4d17d32bba9c1c704ae34b22c8b50e09286d9b7749b1" exitCode=0 Oct 11 04:53:21 crc kubenswrapper[4798]: I1011 04:53:21.396148 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-km6j5" event={"ID":"3938fb90-a727-4764-addb-42aa76b61bab","Type":"ContainerDied","Data":"3419b2640e62a205376f4d17d32bba9c1c704ae34b22c8b50e09286d9b7749b1"} Oct 11 04:53:21 crc kubenswrapper[4798]: I1011 04:53:21.396178 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-km6j5" event={"ID":"3938fb90-a727-4764-addb-42aa76b61bab","Type":"ContainerDied","Data":"b5825cc27d7c613284d4d7c3714c0017aa9c7989e3e398efd77380e5bf5ccb9d"} Oct 11 04:53:21 crc kubenswrapper[4798]: I1011 04:53:21.396207 4798 scope.go:117] "RemoveContainer" containerID="3419b2640e62a205376f4d17d32bba9c1c704ae34b22c8b50e09286d9b7749b1" Oct 11 04:53:21 crc kubenswrapper[4798]: I1011 04:53:21.396222 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-km6j5" Oct 11 04:53:21 crc kubenswrapper[4798]: I1011 04:53:21.441492 4798 scope.go:117] "RemoveContainer" containerID="05a4d7dacd7ec5a805a8d5c14a8ad2d2db90ecbe30606d9397acac5785c56f37" Oct 11 04:53:21 crc kubenswrapper[4798]: I1011 04:53:21.444906 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-km6j5"] Oct 11 04:53:21 crc kubenswrapper[4798]: I1011 04:53:21.466190 4798 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-km6j5"] Oct 11 04:53:21 crc kubenswrapper[4798]: I1011 04:53:21.473642 4798 scope.go:117] "RemoveContainer" containerID="1c3785c0f1ff26f73cadaf27027c4b02020794d3336da77907f704c6239b5686" Oct 11 04:53:21 crc kubenswrapper[4798]: I1011 04:53:21.519027 4798 scope.go:117] "RemoveContainer" containerID="3419b2640e62a205376f4d17d32bba9c1c704ae34b22c8b50e09286d9b7749b1" Oct 11 04:53:21 crc kubenswrapper[4798]: E1011 04:53:21.519760 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3419b2640e62a205376f4d17d32bba9c1c704ae34b22c8b50e09286d9b7749b1\": container with ID starting with 3419b2640e62a205376f4d17d32bba9c1c704ae34b22c8b50e09286d9b7749b1 not found: ID does not exist" containerID="3419b2640e62a205376f4d17d32bba9c1c704ae34b22c8b50e09286d9b7749b1" Oct 11 04:53:21 crc kubenswrapper[4798]: I1011 04:53:21.519804 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3419b2640e62a205376f4d17d32bba9c1c704ae34b22c8b50e09286d9b7749b1"} err="failed to get container status \"3419b2640e62a205376f4d17d32bba9c1c704ae34b22c8b50e09286d9b7749b1\": rpc error: code = NotFound desc = could not find container \"3419b2640e62a205376f4d17d32bba9c1c704ae34b22c8b50e09286d9b7749b1\": container with ID starting with 3419b2640e62a205376f4d17d32bba9c1c704ae34b22c8b50e09286d9b7749b1 not found: ID does not exist" Oct 11 04:53:21 crc kubenswrapper[4798]: I1011 04:53:21.519829 4798 scope.go:117] "RemoveContainer" containerID="05a4d7dacd7ec5a805a8d5c14a8ad2d2db90ecbe30606d9397acac5785c56f37" Oct 11 04:53:21 crc kubenswrapper[4798]: E1011 04:53:21.520321 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"05a4d7dacd7ec5a805a8d5c14a8ad2d2db90ecbe30606d9397acac5785c56f37\": container with ID starting with 05a4d7dacd7ec5a805a8d5c14a8ad2d2db90ecbe30606d9397acac5785c56f37 not found: ID does not exist" containerID="05a4d7dacd7ec5a805a8d5c14a8ad2d2db90ecbe30606d9397acac5785c56f37" Oct 11 04:53:21 crc kubenswrapper[4798]: I1011 04:53:21.520347 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"05a4d7dacd7ec5a805a8d5c14a8ad2d2db90ecbe30606d9397acac5785c56f37"} err="failed to get container status \"05a4d7dacd7ec5a805a8d5c14a8ad2d2db90ecbe30606d9397acac5785c56f37\": rpc error: code = NotFound desc = could not find container \"05a4d7dacd7ec5a805a8d5c14a8ad2d2db90ecbe30606d9397acac5785c56f37\": container with ID starting with 05a4d7dacd7ec5a805a8d5c14a8ad2d2db90ecbe30606d9397acac5785c56f37 not found: ID does not exist" Oct 11 04:53:21 crc kubenswrapper[4798]: I1011 04:53:21.520360 4798 scope.go:117] "RemoveContainer" containerID="1c3785c0f1ff26f73cadaf27027c4b02020794d3336da77907f704c6239b5686" Oct 11 04:53:21 crc kubenswrapper[4798]: E1011 04:53:21.520717 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1c3785c0f1ff26f73cadaf27027c4b02020794d3336da77907f704c6239b5686\": container with ID starting with 1c3785c0f1ff26f73cadaf27027c4b02020794d3336da77907f704c6239b5686 not found: ID does not exist" containerID="1c3785c0f1ff26f73cadaf27027c4b02020794d3336da77907f704c6239b5686" Oct 11 04:53:21 crc kubenswrapper[4798]: I1011 04:53:21.520741 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1c3785c0f1ff26f73cadaf27027c4b02020794d3336da77907f704c6239b5686"} err="failed to get container status \"1c3785c0f1ff26f73cadaf27027c4b02020794d3336da77907f704c6239b5686\": rpc error: code = NotFound desc = could not find container \"1c3785c0f1ff26f73cadaf27027c4b02020794d3336da77907f704c6239b5686\": container with ID starting with 1c3785c0f1ff26f73cadaf27027c4b02020794d3336da77907f704c6239b5686 not found: ID does not exist" Oct 11 04:53:23 crc kubenswrapper[4798]: I1011 04:53:23.452863 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3938fb90-a727-4764-addb-42aa76b61bab" path="/var/lib/kubelet/pods/3938fb90-a727-4764-addb-42aa76b61bab/volumes" Oct 11 04:53:23 crc kubenswrapper[4798]: I1011 04:53:23.625924 4798 scope.go:117] "RemoveContainer" containerID="9c63436abf654e2f49bdff0f01507463c397dc69a0159011b30d7e6837ab9c2a" Oct 11 04:53:23 crc kubenswrapper[4798]: I1011 04:53:23.826299 4798 scope.go:117] "RemoveContainer" containerID="64bf0f6849af5de6416556618b579376ee499f79de5896582cca83d7a060a6cb" Oct 11 04:53:27 crc kubenswrapper[4798]: I1011 04:53:27.138138 4798 patch_prober.go:28] interesting pod/machine-config-daemon-h28s2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 11 04:53:27 crc kubenswrapper[4798]: I1011 04:53:27.139151 4798 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 11 04:53:57 crc kubenswrapper[4798]: I1011 04:53:57.139462 4798 patch_prober.go:28] interesting pod/machine-config-daemon-h28s2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 11 04:53:57 crc kubenswrapper[4798]: I1011 04:53:57.140375 4798 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 11 04:53:57 crc kubenswrapper[4798]: I1011 04:53:57.140502 4798 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" Oct 11 04:53:57 crc kubenswrapper[4798]: I1011 04:53:57.141877 4798 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"115ccad5a85066a43a5690fe450fc2e02e8e332700c139e6cb8f105f9866a92d"} pod="openshift-machine-config-operator/machine-config-daemon-h28s2" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Oct 11 04:53:57 crc kubenswrapper[4798]: I1011 04:53:57.141974 4798 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" containerName="machine-config-daemon" containerID="cri-o://115ccad5a85066a43a5690fe450fc2e02e8e332700c139e6cb8f105f9866a92d" gracePeriod=600 Oct 11 04:53:57 crc kubenswrapper[4798]: E1011 04:53:57.302579 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h28s2_openshift-machine-config-operator(42571bc8-2186-4e3b-bba9-28f5a8f364d0)\"" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" Oct 11 04:53:57 crc kubenswrapper[4798]: I1011 04:53:57.902743 4798 generic.go:334] "Generic (PLEG): container finished" podID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" containerID="115ccad5a85066a43a5690fe450fc2e02e8e332700c139e6cb8f105f9866a92d" exitCode=0 Oct 11 04:53:57 crc kubenswrapper[4798]: I1011 04:53:57.902973 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" event={"ID":"42571bc8-2186-4e3b-bba9-28f5a8f364d0","Type":"ContainerDied","Data":"115ccad5a85066a43a5690fe450fc2e02e8e332700c139e6cb8f105f9866a92d"} Oct 11 04:53:57 crc kubenswrapper[4798]: I1011 04:53:57.903379 4798 scope.go:117] "RemoveContainer" containerID="d5e0dca4104eeca7c9a76b11cb7ae2acd478e963ddf4e0a9647a5803b7ebfc90" Oct 11 04:53:57 crc kubenswrapper[4798]: I1011 04:53:57.904657 4798 scope.go:117] "RemoveContainer" containerID="115ccad5a85066a43a5690fe450fc2e02e8e332700c139e6cb8f105f9866a92d" Oct 11 04:53:57 crc kubenswrapper[4798]: E1011 04:53:57.905112 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h28s2_openshift-machine-config-operator(42571bc8-2186-4e3b-bba9-28f5a8f364d0)\"" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" Oct 11 04:54:12 crc kubenswrapper[4798]: I1011 04:54:12.425084 4798 scope.go:117] "RemoveContainer" containerID="115ccad5a85066a43a5690fe450fc2e02e8e332700c139e6cb8f105f9866a92d" Oct 11 04:54:12 crc kubenswrapper[4798]: E1011 04:54:12.426076 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h28s2_openshift-machine-config-operator(42571bc8-2186-4e3b-bba9-28f5a8f364d0)\"" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" Oct 11 04:54:14 crc kubenswrapper[4798]: I1011 04:54:14.111200 4798 generic.go:334] "Generic (PLEG): container finished" podID="b3b2c8d8-3245-48a3-be3a-099046cf7258" containerID="7f8f349b21d5c10e3bac64ece6b39de4df0bb89dc8edb59a0a0c95b9f7c0116e" exitCode=0 Oct 11 04:54:14 crc kubenswrapper[4798]: I1011 04:54:14.111330 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest" event={"ID":"b3b2c8d8-3245-48a3-be3a-099046cf7258","Type":"ContainerDied","Data":"7f8f349b21d5c10e3bac64ece6b39de4df0bb89dc8edb59a0a0c95b9f7c0116e"} Oct 11 04:54:15 crc kubenswrapper[4798]: I1011 04:54:15.619481 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest" Oct 11 04:54:15 crc kubenswrapper[4798]: I1011 04:54:15.645310 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/b3b2c8d8-3245-48a3-be3a-099046cf7258-config-data\") pod \"b3b2c8d8-3245-48a3-be3a-099046cf7258\" (UID: \"b3b2c8d8-3245-48a3-be3a-099046cf7258\") " Oct 11 04:54:15 crc kubenswrapper[4798]: I1011 04:54:15.645415 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/b3b2c8d8-3245-48a3-be3a-099046cf7258-openstack-config\") pod \"b3b2c8d8-3245-48a3-be3a-099046cf7258\" (UID: \"b3b2c8d8-3245-48a3-be3a-099046cf7258\") " Oct 11 04:54:15 crc kubenswrapper[4798]: I1011 04:54:15.645522 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/b3b2c8d8-3245-48a3-be3a-099046cf7258-test-operator-ephemeral-workdir\") pod \"b3b2c8d8-3245-48a3-be3a-099046cf7258\" (UID: \"b3b2c8d8-3245-48a3-be3a-099046cf7258\") " Oct 11 04:54:15 crc kubenswrapper[4798]: I1011 04:54:15.645660 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/b3b2c8d8-3245-48a3-be3a-099046cf7258-test-operator-ephemeral-temporary\") pod \"b3b2c8d8-3245-48a3-be3a-099046cf7258\" (UID: \"b3b2c8d8-3245-48a3-be3a-099046cf7258\") " Oct 11 04:54:15 crc kubenswrapper[4798]: I1011 04:54:15.645904 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"test-operator-logs\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"b3b2c8d8-3245-48a3-be3a-099046cf7258\" (UID: \"b3b2c8d8-3245-48a3-be3a-099046cf7258\") " Oct 11 04:54:15 crc kubenswrapper[4798]: I1011 04:54:15.645993 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/b3b2c8d8-3245-48a3-be3a-099046cf7258-openstack-config-secret\") pod \"b3b2c8d8-3245-48a3-be3a-099046cf7258\" (UID: \"b3b2c8d8-3245-48a3-be3a-099046cf7258\") " Oct 11 04:54:15 crc kubenswrapper[4798]: I1011 04:54:15.646066 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-77cd5\" (UniqueName: \"kubernetes.io/projected/b3b2c8d8-3245-48a3-be3a-099046cf7258-kube-api-access-77cd5\") pod \"b3b2c8d8-3245-48a3-be3a-099046cf7258\" (UID: \"b3b2c8d8-3245-48a3-be3a-099046cf7258\") " Oct 11 04:54:15 crc kubenswrapper[4798]: I1011 04:54:15.646109 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b3b2c8d8-3245-48a3-be3a-099046cf7258-ssh-key\") pod \"b3b2c8d8-3245-48a3-be3a-099046cf7258\" (UID: \"b3b2c8d8-3245-48a3-be3a-099046cf7258\") " Oct 11 04:54:15 crc kubenswrapper[4798]: I1011 04:54:15.646186 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/b3b2c8d8-3245-48a3-be3a-099046cf7258-ca-certs\") pod \"b3b2c8d8-3245-48a3-be3a-099046cf7258\" (UID: \"b3b2c8d8-3245-48a3-be3a-099046cf7258\") " Oct 11 04:54:15 crc kubenswrapper[4798]: I1011 04:54:15.646760 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b3b2c8d8-3245-48a3-be3a-099046cf7258-config-data" (OuterVolumeSpecName: "config-data") pod "b3b2c8d8-3245-48a3-be3a-099046cf7258" (UID: "b3b2c8d8-3245-48a3-be3a-099046cf7258"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 04:54:15 crc kubenswrapper[4798]: I1011 04:54:15.647215 4798 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/b3b2c8d8-3245-48a3-be3a-099046cf7258-config-data\") on node \"crc\" DevicePath \"\"" Oct 11 04:54:15 crc kubenswrapper[4798]: I1011 04:54:15.648192 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b3b2c8d8-3245-48a3-be3a-099046cf7258-test-operator-ephemeral-temporary" (OuterVolumeSpecName: "test-operator-ephemeral-temporary") pod "b3b2c8d8-3245-48a3-be3a-099046cf7258" (UID: "b3b2c8d8-3245-48a3-be3a-099046cf7258"). InnerVolumeSpecName "test-operator-ephemeral-temporary". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 04:54:15 crc kubenswrapper[4798]: I1011 04:54:15.655569 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b3b2c8d8-3245-48a3-be3a-099046cf7258-test-operator-ephemeral-workdir" (OuterVolumeSpecName: "test-operator-ephemeral-workdir") pod "b3b2c8d8-3245-48a3-be3a-099046cf7258" (UID: "b3b2c8d8-3245-48a3-be3a-099046cf7258"). InnerVolumeSpecName "test-operator-ephemeral-workdir". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 04:54:15 crc kubenswrapper[4798]: I1011 04:54:15.656714 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b3b2c8d8-3245-48a3-be3a-099046cf7258-kube-api-access-77cd5" (OuterVolumeSpecName: "kube-api-access-77cd5") pod "b3b2c8d8-3245-48a3-be3a-099046cf7258" (UID: "b3b2c8d8-3245-48a3-be3a-099046cf7258"). InnerVolumeSpecName "kube-api-access-77cd5". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 04:54:15 crc kubenswrapper[4798]: I1011 04:54:15.662912 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage08-crc" (OuterVolumeSpecName: "test-operator-logs") pod "b3b2c8d8-3245-48a3-be3a-099046cf7258" (UID: "b3b2c8d8-3245-48a3-be3a-099046cf7258"). InnerVolumeSpecName "local-storage08-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Oct 11 04:54:15 crc kubenswrapper[4798]: I1011 04:54:15.688223 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b3b2c8d8-3245-48a3-be3a-099046cf7258-ca-certs" (OuterVolumeSpecName: "ca-certs") pod "b3b2c8d8-3245-48a3-be3a-099046cf7258" (UID: "b3b2c8d8-3245-48a3-be3a-099046cf7258"). InnerVolumeSpecName "ca-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:54:15 crc kubenswrapper[4798]: I1011 04:54:15.693468 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b3b2c8d8-3245-48a3-be3a-099046cf7258-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "b3b2c8d8-3245-48a3-be3a-099046cf7258" (UID: "b3b2c8d8-3245-48a3-be3a-099046cf7258"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:54:15 crc kubenswrapper[4798]: I1011 04:54:15.699742 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b3b2c8d8-3245-48a3-be3a-099046cf7258-openstack-config-secret" (OuterVolumeSpecName: "openstack-config-secret") pod "b3b2c8d8-3245-48a3-be3a-099046cf7258" (UID: "b3b2c8d8-3245-48a3-be3a-099046cf7258"). InnerVolumeSpecName "openstack-config-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 04:54:15 crc kubenswrapper[4798]: I1011 04:54:15.744286 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b3b2c8d8-3245-48a3-be3a-099046cf7258-openstack-config" (OuterVolumeSpecName: "openstack-config") pod "b3b2c8d8-3245-48a3-be3a-099046cf7258" (UID: "b3b2c8d8-3245-48a3-be3a-099046cf7258"). InnerVolumeSpecName "openstack-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 04:54:15 crc kubenswrapper[4798]: I1011 04:54:15.748354 4798 reconciler_common.go:293] "Volume detached for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/b3b2c8d8-3245-48a3-be3a-099046cf7258-ca-certs\") on node \"crc\" DevicePath \"\"" Oct 11 04:54:15 crc kubenswrapper[4798]: I1011 04:54:15.748424 4798 reconciler_common.go:293] "Volume detached for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/b3b2c8d8-3245-48a3-be3a-099046cf7258-openstack-config\") on node \"crc\" DevicePath \"\"" Oct 11 04:54:15 crc kubenswrapper[4798]: I1011 04:54:15.748441 4798 reconciler_common.go:293] "Volume detached for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/b3b2c8d8-3245-48a3-be3a-099046cf7258-test-operator-ephemeral-workdir\") on node \"crc\" DevicePath \"\"" Oct 11 04:54:15 crc kubenswrapper[4798]: I1011 04:54:15.748454 4798 reconciler_common.go:293] "Volume detached for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/b3b2c8d8-3245-48a3-be3a-099046cf7258-test-operator-ephemeral-temporary\") on node \"crc\" DevicePath \"\"" Oct 11 04:54:15 crc kubenswrapper[4798]: I1011 04:54:15.748502 4798 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") on node \"crc\" " Oct 11 04:54:15 crc kubenswrapper[4798]: I1011 04:54:15.748518 4798 reconciler_common.go:293] "Volume detached for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/b3b2c8d8-3245-48a3-be3a-099046cf7258-openstack-config-secret\") on node \"crc\" DevicePath \"\"" Oct 11 04:54:15 crc kubenswrapper[4798]: I1011 04:54:15.748533 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-77cd5\" (UniqueName: \"kubernetes.io/projected/b3b2c8d8-3245-48a3-be3a-099046cf7258-kube-api-access-77cd5\") on node \"crc\" DevicePath \"\"" Oct 11 04:54:15 crc kubenswrapper[4798]: I1011 04:54:15.748544 4798 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b3b2c8d8-3245-48a3-be3a-099046cf7258-ssh-key\") on node \"crc\" DevicePath \"\"" Oct 11 04:54:15 crc kubenswrapper[4798]: I1011 04:54:15.780025 4798 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage08-crc" (UniqueName: "kubernetes.io/local-volume/local-storage08-crc") on node "crc" Oct 11 04:54:15 crc kubenswrapper[4798]: I1011 04:54:15.850988 4798 reconciler_common.go:293] "Volume detached for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") on node \"crc\" DevicePath \"\"" Oct 11 04:54:16 crc kubenswrapper[4798]: I1011 04:54:16.135966 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest" event={"ID":"b3b2c8d8-3245-48a3-be3a-099046cf7258","Type":"ContainerDied","Data":"c2019c07467b5462271a938e008560e2bb76709f3638f79fdcbf99b755452c32"} Oct 11 04:54:16 crc kubenswrapper[4798]: I1011 04:54:16.136020 4798 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c2019c07467b5462271a938e008560e2bb76709f3638f79fdcbf99b755452c32" Oct 11 04:54:16 crc kubenswrapper[4798]: I1011 04:54:16.136054 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest" Oct 11 04:54:18 crc kubenswrapper[4798]: I1011 04:54:18.197213 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/test-operator-logs-pod-tempest-tempest-tests-tempest"] Oct 11 04:54:18 crc kubenswrapper[4798]: E1011 04:54:18.199248 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3938fb90-a727-4764-addb-42aa76b61bab" containerName="registry-server" Oct 11 04:54:18 crc kubenswrapper[4798]: I1011 04:54:18.199327 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="3938fb90-a727-4764-addb-42aa76b61bab" containerName="registry-server" Oct 11 04:54:18 crc kubenswrapper[4798]: E1011 04:54:18.199482 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b3b2c8d8-3245-48a3-be3a-099046cf7258" containerName="tempest-tests-tempest-tests-runner" Oct 11 04:54:18 crc kubenswrapper[4798]: I1011 04:54:18.199549 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="b3b2c8d8-3245-48a3-be3a-099046cf7258" containerName="tempest-tests-tempest-tests-runner" Oct 11 04:54:18 crc kubenswrapper[4798]: E1011 04:54:18.199632 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3938fb90-a727-4764-addb-42aa76b61bab" containerName="extract-content" Oct 11 04:54:18 crc kubenswrapper[4798]: I1011 04:54:18.199687 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="3938fb90-a727-4764-addb-42aa76b61bab" containerName="extract-content" Oct 11 04:54:18 crc kubenswrapper[4798]: E1011 04:54:18.199773 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3938fb90-a727-4764-addb-42aa76b61bab" containerName="extract-utilities" Oct 11 04:54:18 crc kubenswrapper[4798]: I1011 04:54:18.199829 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="3938fb90-a727-4764-addb-42aa76b61bab" containerName="extract-utilities" Oct 11 04:54:18 crc kubenswrapper[4798]: I1011 04:54:18.200076 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="3938fb90-a727-4764-addb-42aa76b61bab" containerName="registry-server" Oct 11 04:54:18 crc kubenswrapper[4798]: I1011 04:54:18.200156 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="b3b2c8d8-3245-48a3-be3a-099046cf7258" containerName="tempest-tests-tempest-tests-runner" Oct 11 04:54:18 crc kubenswrapper[4798]: I1011 04:54:18.201038 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Oct 11 04:54:18 crc kubenswrapper[4798]: I1011 04:54:18.204502 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"default-dockercfg-5wwt4" Oct 11 04:54:18 crc kubenswrapper[4798]: I1011 04:54:18.221050 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/test-operator-logs-pod-tempest-tempest-tests-tempest"] Oct 11 04:54:18 crc kubenswrapper[4798]: I1011 04:54:18.315115 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mxnn5\" (UniqueName: \"kubernetes.io/projected/b522303e-6696-4b73-87a2-127b91d444af-kube-api-access-mxnn5\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"b522303e-6696-4b73-87a2-127b91d444af\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Oct 11 04:54:18 crc kubenswrapper[4798]: I1011 04:54:18.315605 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"b522303e-6696-4b73-87a2-127b91d444af\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Oct 11 04:54:18 crc kubenswrapper[4798]: I1011 04:54:18.418484 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mxnn5\" (UniqueName: \"kubernetes.io/projected/b522303e-6696-4b73-87a2-127b91d444af-kube-api-access-mxnn5\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"b522303e-6696-4b73-87a2-127b91d444af\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Oct 11 04:54:18 crc kubenswrapper[4798]: I1011 04:54:18.418593 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"b522303e-6696-4b73-87a2-127b91d444af\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Oct 11 04:54:18 crc kubenswrapper[4798]: I1011 04:54:18.419538 4798 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"b522303e-6696-4b73-87a2-127b91d444af\") device mount path \"/mnt/openstack/pv08\"" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Oct 11 04:54:18 crc kubenswrapper[4798]: I1011 04:54:18.453599 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"b522303e-6696-4b73-87a2-127b91d444af\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Oct 11 04:54:18 crc kubenswrapper[4798]: I1011 04:54:18.463186 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mxnn5\" (UniqueName: \"kubernetes.io/projected/b522303e-6696-4b73-87a2-127b91d444af-kube-api-access-mxnn5\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"b522303e-6696-4b73-87a2-127b91d444af\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Oct 11 04:54:18 crc kubenswrapper[4798]: I1011 04:54:18.524169 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Oct 11 04:54:19 crc kubenswrapper[4798]: I1011 04:54:19.112690 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/test-operator-logs-pod-tempest-tempest-tests-tempest"] Oct 11 04:54:19 crc kubenswrapper[4798]: I1011 04:54:19.188606 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" event={"ID":"b522303e-6696-4b73-87a2-127b91d444af","Type":"ContainerStarted","Data":"f0e9fa611482bc0914a25372ae2492d37c74adf13359bf07273d19a67407fb8a"} Oct 11 04:54:21 crc kubenswrapper[4798]: I1011 04:54:21.219243 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" event={"ID":"b522303e-6696-4b73-87a2-127b91d444af","Type":"ContainerStarted","Data":"68e31bb0ce69d5e89fa5c0938a1a821fedbdf53f8459d2896e94c450852bf936"} Oct 11 04:54:23 crc kubenswrapper[4798]: I1011 04:54:23.423784 4798 scope.go:117] "RemoveContainer" containerID="115ccad5a85066a43a5690fe450fc2e02e8e332700c139e6cb8f105f9866a92d" Oct 11 04:54:23 crc kubenswrapper[4798]: E1011 04:54:23.424431 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h28s2_openshift-machine-config-operator(42571bc8-2186-4e3b-bba9-28f5a8f364d0)\"" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" Oct 11 04:54:35 crc kubenswrapper[4798]: I1011 04:54:35.430645 4798 scope.go:117] "RemoveContainer" containerID="115ccad5a85066a43a5690fe450fc2e02e8e332700c139e6cb8f105f9866a92d" Oct 11 04:54:35 crc kubenswrapper[4798]: E1011 04:54:35.432345 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h28s2_openshift-machine-config-operator(42571bc8-2186-4e3b-bba9-28f5a8f364d0)\"" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" Oct 11 04:54:37 crc kubenswrapper[4798]: I1011 04:54:37.909495 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" podStartSLOduration=18.791022095 podStartE2EDuration="19.909470552s" podCreationTimestamp="2025-10-11 04:54:18 +0000 UTC" firstStartedPulling="2025-10-11 04:54:19.131915286 +0000 UTC m=+3554.468204972" lastFinishedPulling="2025-10-11 04:54:20.250363743 +0000 UTC m=+3555.586653429" observedRunningTime="2025-10-11 04:54:21.251254185 +0000 UTC m=+3556.587543891" watchObservedRunningTime="2025-10-11 04:54:37.909470552 +0000 UTC m=+3573.245760238" Oct 11 04:54:37 crc kubenswrapper[4798]: I1011 04:54:37.911046 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-6rjbn/must-gather-wwgth"] Oct 11 04:54:37 crc kubenswrapper[4798]: I1011 04:54:37.913292 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-6rjbn/must-gather-wwgth" Oct 11 04:54:37 crc kubenswrapper[4798]: I1011 04:54:37.915823 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-6rjbn"/"openshift-service-ca.crt" Oct 11 04:54:37 crc kubenswrapper[4798]: I1011 04:54:37.916031 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-must-gather-6rjbn"/"default-dockercfg-7fk7h" Oct 11 04:54:37 crc kubenswrapper[4798]: I1011 04:54:37.916236 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-6rjbn"/"kube-root-ca.crt" Oct 11 04:54:37 crc kubenswrapper[4798]: I1011 04:54:37.924178 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-6rjbn/must-gather-wwgth"] Oct 11 04:54:37 crc kubenswrapper[4798]: I1011 04:54:37.976164 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fpxzp\" (UniqueName: \"kubernetes.io/projected/2d61516a-feb6-4497-8e4c-4c91cb6618e9-kube-api-access-fpxzp\") pod \"must-gather-wwgth\" (UID: \"2d61516a-feb6-4497-8e4c-4c91cb6618e9\") " pod="openshift-must-gather-6rjbn/must-gather-wwgth" Oct 11 04:54:37 crc kubenswrapper[4798]: I1011 04:54:37.976232 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/2d61516a-feb6-4497-8e4c-4c91cb6618e9-must-gather-output\") pod \"must-gather-wwgth\" (UID: \"2d61516a-feb6-4497-8e4c-4c91cb6618e9\") " pod="openshift-must-gather-6rjbn/must-gather-wwgth" Oct 11 04:54:38 crc kubenswrapper[4798]: I1011 04:54:38.078889 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fpxzp\" (UniqueName: \"kubernetes.io/projected/2d61516a-feb6-4497-8e4c-4c91cb6618e9-kube-api-access-fpxzp\") pod \"must-gather-wwgth\" (UID: \"2d61516a-feb6-4497-8e4c-4c91cb6618e9\") " pod="openshift-must-gather-6rjbn/must-gather-wwgth" Oct 11 04:54:38 crc kubenswrapper[4798]: I1011 04:54:38.078978 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/2d61516a-feb6-4497-8e4c-4c91cb6618e9-must-gather-output\") pod \"must-gather-wwgth\" (UID: \"2d61516a-feb6-4497-8e4c-4c91cb6618e9\") " pod="openshift-must-gather-6rjbn/must-gather-wwgth" Oct 11 04:54:38 crc kubenswrapper[4798]: I1011 04:54:38.079726 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/2d61516a-feb6-4497-8e4c-4c91cb6618e9-must-gather-output\") pod \"must-gather-wwgth\" (UID: \"2d61516a-feb6-4497-8e4c-4c91cb6618e9\") " pod="openshift-must-gather-6rjbn/must-gather-wwgth" Oct 11 04:54:38 crc kubenswrapper[4798]: I1011 04:54:38.101686 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fpxzp\" (UniqueName: \"kubernetes.io/projected/2d61516a-feb6-4497-8e4c-4c91cb6618e9-kube-api-access-fpxzp\") pod \"must-gather-wwgth\" (UID: \"2d61516a-feb6-4497-8e4c-4c91cb6618e9\") " pod="openshift-must-gather-6rjbn/must-gather-wwgth" Oct 11 04:54:38 crc kubenswrapper[4798]: I1011 04:54:38.235884 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-6rjbn/must-gather-wwgth" Oct 11 04:54:38 crc kubenswrapper[4798]: I1011 04:54:38.769033 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-6rjbn/must-gather-wwgth"] Oct 11 04:54:38 crc kubenswrapper[4798]: W1011 04:54:38.776610 4798 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2d61516a_feb6_4497_8e4c_4c91cb6618e9.slice/crio-53b19ca59ef214ed764c273a2d4614165d5b72620863b45d1b9408003b673a81 WatchSource:0}: Error finding container 53b19ca59ef214ed764c273a2d4614165d5b72620863b45d1b9408003b673a81: Status 404 returned error can't find the container with id 53b19ca59ef214ed764c273a2d4614165d5b72620863b45d1b9408003b673a81 Oct 11 04:54:39 crc kubenswrapper[4798]: I1011 04:54:39.485035 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-6rjbn/must-gather-wwgth" event={"ID":"2d61516a-feb6-4497-8e4c-4c91cb6618e9","Type":"ContainerStarted","Data":"53b19ca59ef214ed764c273a2d4614165d5b72620863b45d1b9408003b673a81"} Oct 11 04:54:46 crc kubenswrapper[4798]: I1011 04:54:46.574237 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-6rjbn/must-gather-wwgth" event={"ID":"2d61516a-feb6-4497-8e4c-4c91cb6618e9","Type":"ContainerStarted","Data":"2d5e3bbc4db91851838d6c3e19a820a15212baf498672037b67d05ce2575dd99"} Oct 11 04:54:46 crc kubenswrapper[4798]: I1011 04:54:46.575071 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-6rjbn/must-gather-wwgth" event={"ID":"2d61516a-feb6-4497-8e4c-4c91cb6618e9","Type":"ContainerStarted","Data":"6837eaa8e60b27d5c42d5c6d75533620642989586fd991e5ec749c6be19ba4a5"} Oct 11 04:54:46 crc kubenswrapper[4798]: I1011 04:54:46.597375 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-6rjbn/must-gather-wwgth" podStartSLOduration=3.006823606 podStartE2EDuration="9.597352372s" podCreationTimestamp="2025-10-11 04:54:37 +0000 UTC" firstStartedPulling="2025-10-11 04:54:38.78002517 +0000 UTC m=+3574.116314876" lastFinishedPulling="2025-10-11 04:54:45.370553946 +0000 UTC m=+3580.706843642" observedRunningTime="2025-10-11 04:54:46.596046301 +0000 UTC m=+3581.932335987" watchObservedRunningTime="2025-10-11 04:54:46.597352372 +0000 UTC m=+3581.933642058" Oct 11 04:54:49 crc kubenswrapper[4798]: I1011 04:54:49.424629 4798 scope.go:117] "RemoveContainer" containerID="115ccad5a85066a43a5690fe450fc2e02e8e332700c139e6cb8f105f9866a92d" Oct 11 04:54:49 crc kubenswrapper[4798]: E1011 04:54:49.425638 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h28s2_openshift-machine-config-operator(42571bc8-2186-4e3b-bba9-28f5a8f364d0)\"" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" Oct 11 04:54:50 crc kubenswrapper[4798]: I1011 04:54:50.047975 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-6rjbn/crc-debug-bnrmz"] Oct 11 04:54:50 crc kubenswrapper[4798]: I1011 04:54:50.050139 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-6rjbn/crc-debug-bnrmz" Oct 11 04:54:50 crc kubenswrapper[4798]: I1011 04:54:50.097797 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/2b413609-1dea-42b9-9803-e4cf06cc3f02-host\") pod \"crc-debug-bnrmz\" (UID: \"2b413609-1dea-42b9-9803-e4cf06cc3f02\") " pod="openshift-must-gather-6rjbn/crc-debug-bnrmz" Oct 11 04:54:50 crc kubenswrapper[4798]: I1011 04:54:50.097900 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-llxgk\" (UniqueName: \"kubernetes.io/projected/2b413609-1dea-42b9-9803-e4cf06cc3f02-kube-api-access-llxgk\") pod \"crc-debug-bnrmz\" (UID: \"2b413609-1dea-42b9-9803-e4cf06cc3f02\") " pod="openshift-must-gather-6rjbn/crc-debug-bnrmz" Oct 11 04:54:50 crc kubenswrapper[4798]: I1011 04:54:50.200377 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/2b413609-1dea-42b9-9803-e4cf06cc3f02-host\") pod \"crc-debug-bnrmz\" (UID: \"2b413609-1dea-42b9-9803-e4cf06cc3f02\") " pod="openshift-must-gather-6rjbn/crc-debug-bnrmz" Oct 11 04:54:50 crc kubenswrapper[4798]: I1011 04:54:50.200466 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-llxgk\" (UniqueName: \"kubernetes.io/projected/2b413609-1dea-42b9-9803-e4cf06cc3f02-kube-api-access-llxgk\") pod \"crc-debug-bnrmz\" (UID: \"2b413609-1dea-42b9-9803-e4cf06cc3f02\") " pod="openshift-must-gather-6rjbn/crc-debug-bnrmz" Oct 11 04:54:50 crc kubenswrapper[4798]: I1011 04:54:50.200478 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/2b413609-1dea-42b9-9803-e4cf06cc3f02-host\") pod \"crc-debug-bnrmz\" (UID: \"2b413609-1dea-42b9-9803-e4cf06cc3f02\") " pod="openshift-must-gather-6rjbn/crc-debug-bnrmz" Oct 11 04:54:50 crc kubenswrapper[4798]: I1011 04:54:50.223470 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-llxgk\" (UniqueName: \"kubernetes.io/projected/2b413609-1dea-42b9-9803-e4cf06cc3f02-kube-api-access-llxgk\") pod \"crc-debug-bnrmz\" (UID: \"2b413609-1dea-42b9-9803-e4cf06cc3f02\") " pod="openshift-must-gather-6rjbn/crc-debug-bnrmz" Oct 11 04:54:50 crc kubenswrapper[4798]: I1011 04:54:50.382088 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-6rjbn/crc-debug-bnrmz" Oct 11 04:54:50 crc kubenswrapper[4798]: I1011 04:54:50.616054 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-6rjbn/crc-debug-bnrmz" event={"ID":"2b413609-1dea-42b9-9803-e4cf06cc3f02","Type":"ContainerStarted","Data":"01ed07788398fa6c17be30739115355f6c85a85f51a4a168174cd24864d19976"} Oct 11 04:55:02 crc kubenswrapper[4798]: I1011 04:55:02.424121 4798 scope.go:117] "RemoveContainer" containerID="115ccad5a85066a43a5690fe450fc2e02e8e332700c139e6cb8f105f9866a92d" Oct 11 04:55:02 crc kubenswrapper[4798]: E1011 04:55:02.424991 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h28s2_openshift-machine-config-operator(42571bc8-2186-4e3b-bba9-28f5a8f364d0)\"" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" Oct 11 04:55:03 crc kubenswrapper[4798]: I1011 04:55:03.798664 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-6rjbn/crc-debug-bnrmz" event={"ID":"2b413609-1dea-42b9-9803-e4cf06cc3f02","Type":"ContainerStarted","Data":"2719b3cbd22c3fc963f31763151aa39a556a330e954cb5a8392d51bd71980006"} Oct 11 04:55:03 crc kubenswrapper[4798]: I1011 04:55:03.828526 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-6rjbn/crc-debug-bnrmz" podStartSLOduration=1.481227598 podStartE2EDuration="13.828497584s" podCreationTimestamp="2025-10-11 04:54:50 +0000 UTC" firstStartedPulling="2025-10-11 04:54:50.421342433 +0000 UTC m=+3585.757632119" lastFinishedPulling="2025-10-11 04:55:02.768612429 +0000 UTC m=+3598.104902105" observedRunningTime="2025-10-11 04:55:03.818048698 +0000 UTC m=+3599.154338424" watchObservedRunningTime="2025-10-11 04:55:03.828497584 +0000 UTC m=+3599.164787300" Oct 11 04:55:16 crc kubenswrapper[4798]: I1011 04:55:16.424080 4798 scope.go:117] "RemoveContainer" containerID="115ccad5a85066a43a5690fe450fc2e02e8e332700c139e6cb8f105f9866a92d" Oct 11 04:55:16 crc kubenswrapper[4798]: E1011 04:55:16.425120 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h28s2_openshift-machine-config-operator(42571bc8-2186-4e3b-bba9-28f5a8f364d0)\"" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" Oct 11 04:55:28 crc kubenswrapper[4798]: I1011 04:55:28.424676 4798 scope.go:117] "RemoveContainer" containerID="115ccad5a85066a43a5690fe450fc2e02e8e332700c139e6cb8f105f9866a92d" Oct 11 04:55:28 crc kubenswrapper[4798]: E1011 04:55:28.425729 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h28s2_openshift-machine-config-operator(42571bc8-2186-4e3b-bba9-28f5a8f364d0)\"" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" Oct 11 04:55:39 crc kubenswrapper[4798]: I1011 04:55:39.425139 4798 scope.go:117] "RemoveContainer" containerID="115ccad5a85066a43a5690fe450fc2e02e8e332700c139e6cb8f105f9866a92d" Oct 11 04:55:39 crc kubenswrapper[4798]: E1011 04:55:39.426381 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h28s2_openshift-machine-config-operator(42571bc8-2186-4e3b-bba9-28f5a8f364d0)\"" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" Oct 11 04:55:41 crc kubenswrapper[4798]: E1011 04:55:41.031675 4798 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2b413609_1dea_42b9_9803_e4cf06cc3f02.slice/crio-conmon-2719b3cbd22c3fc963f31763151aa39a556a330e954cb5a8392d51bd71980006.scope\": RecentStats: unable to find data in memory cache]" Oct 11 04:55:41 crc kubenswrapper[4798]: I1011 04:55:41.189281 4798 generic.go:334] "Generic (PLEG): container finished" podID="2b413609-1dea-42b9-9803-e4cf06cc3f02" containerID="2719b3cbd22c3fc963f31763151aa39a556a330e954cb5a8392d51bd71980006" exitCode=0 Oct 11 04:55:41 crc kubenswrapper[4798]: I1011 04:55:41.189768 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-6rjbn/crc-debug-bnrmz" event={"ID":"2b413609-1dea-42b9-9803-e4cf06cc3f02","Type":"ContainerDied","Data":"2719b3cbd22c3fc963f31763151aa39a556a330e954cb5a8392d51bd71980006"} Oct 11 04:55:42 crc kubenswrapper[4798]: I1011 04:55:42.336901 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-6rjbn/crc-debug-bnrmz" Oct 11 04:55:42 crc kubenswrapper[4798]: I1011 04:55:42.377956 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-6rjbn/crc-debug-bnrmz"] Oct 11 04:55:42 crc kubenswrapper[4798]: I1011 04:55:42.387271 4798 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-6rjbn/crc-debug-bnrmz"] Oct 11 04:55:42 crc kubenswrapper[4798]: I1011 04:55:42.460223 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/2b413609-1dea-42b9-9803-e4cf06cc3f02-host\") pod \"2b413609-1dea-42b9-9803-e4cf06cc3f02\" (UID: \"2b413609-1dea-42b9-9803-e4cf06cc3f02\") " Oct 11 04:55:42 crc kubenswrapper[4798]: I1011 04:55:42.460405 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/2b413609-1dea-42b9-9803-e4cf06cc3f02-host" (OuterVolumeSpecName: "host") pod "2b413609-1dea-42b9-9803-e4cf06cc3f02" (UID: "2b413609-1dea-42b9-9803-e4cf06cc3f02"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 11 04:55:42 crc kubenswrapper[4798]: I1011 04:55:42.460470 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-llxgk\" (UniqueName: \"kubernetes.io/projected/2b413609-1dea-42b9-9803-e4cf06cc3f02-kube-api-access-llxgk\") pod \"2b413609-1dea-42b9-9803-e4cf06cc3f02\" (UID: \"2b413609-1dea-42b9-9803-e4cf06cc3f02\") " Oct 11 04:55:42 crc kubenswrapper[4798]: I1011 04:55:42.461260 4798 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/2b413609-1dea-42b9-9803-e4cf06cc3f02-host\") on node \"crc\" DevicePath \"\"" Oct 11 04:55:42 crc kubenswrapper[4798]: I1011 04:55:42.467884 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2b413609-1dea-42b9-9803-e4cf06cc3f02-kube-api-access-llxgk" (OuterVolumeSpecName: "kube-api-access-llxgk") pod "2b413609-1dea-42b9-9803-e4cf06cc3f02" (UID: "2b413609-1dea-42b9-9803-e4cf06cc3f02"). InnerVolumeSpecName "kube-api-access-llxgk". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 04:55:42 crc kubenswrapper[4798]: I1011 04:55:42.564457 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-llxgk\" (UniqueName: \"kubernetes.io/projected/2b413609-1dea-42b9-9803-e4cf06cc3f02-kube-api-access-llxgk\") on node \"crc\" DevicePath \"\"" Oct 11 04:55:43 crc kubenswrapper[4798]: I1011 04:55:43.216118 4798 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="01ed07788398fa6c17be30739115355f6c85a85f51a4a168174cd24864d19976" Oct 11 04:55:43 crc kubenswrapper[4798]: I1011 04:55:43.216155 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-6rjbn/crc-debug-bnrmz" Oct 11 04:55:43 crc kubenswrapper[4798]: I1011 04:55:43.443070 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2b413609-1dea-42b9-9803-e4cf06cc3f02" path="/var/lib/kubelet/pods/2b413609-1dea-42b9-9803-e4cf06cc3f02/volumes" Oct 11 04:55:43 crc kubenswrapper[4798]: I1011 04:55:43.618181 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-6rjbn/crc-debug-mvc8m"] Oct 11 04:55:43 crc kubenswrapper[4798]: E1011 04:55:43.618694 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2b413609-1dea-42b9-9803-e4cf06cc3f02" containerName="container-00" Oct 11 04:55:43 crc kubenswrapper[4798]: I1011 04:55:43.618714 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="2b413609-1dea-42b9-9803-e4cf06cc3f02" containerName="container-00" Oct 11 04:55:43 crc kubenswrapper[4798]: I1011 04:55:43.618905 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="2b413609-1dea-42b9-9803-e4cf06cc3f02" containerName="container-00" Oct 11 04:55:43 crc kubenswrapper[4798]: I1011 04:55:43.621246 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-6rjbn/crc-debug-mvc8m" Oct 11 04:55:43 crc kubenswrapper[4798]: I1011 04:55:43.685662 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/f2bda97c-c22b-4c25-8754-3ed1fbfa0ff2-host\") pod \"crc-debug-mvc8m\" (UID: \"f2bda97c-c22b-4c25-8754-3ed1fbfa0ff2\") " pod="openshift-must-gather-6rjbn/crc-debug-mvc8m" Oct 11 04:55:43 crc kubenswrapper[4798]: I1011 04:55:43.685744 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xgt74\" (UniqueName: \"kubernetes.io/projected/f2bda97c-c22b-4c25-8754-3ed1fbfa0ff2-kube-api-access-xgt74\") pod \"crc-debug-mvc8m\" (UID: \"f2bda97c-c22b-4c25-8754-3ed1fbfa0ff2\") " pod="openshift-must-gather-6rjbn/crc-debug-mvc8m" Oct 11 04:55:43 crc kubenswrapper[4798]: I1011 04:55:43.788213 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/f2bda97c-c22b-4c25-8754-3ed1fbfa0ff2-host\") pod \"crc-debug-mvc8m\" (UID: \"f2bda97c-c22b-4c25-8754-3ed1fbfa0ff2\") " pod="openshift-must-gather-6rjbn/crc-debug-mvc8m" Oct 11 04:55:43 crc kubenswrapper[4798]: I1011 04:55:43.788322 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xgt74\" (UniqueName: \"kubernetes.io/projected/f2bda97c-c22b-4c25-8754-3ed1fbfa0ff2-kube-api-access-xgt74\") pod \"crc-debug-mvc8m\" (UID: \"f2bda97c-c22b-4c25-8754-3ed1fbfa0ff2\") " pod="openshift-must-gather-6rjbn/crc-debug-mvc8m" Oct 11 04:55:43 crc kubenswrapper[4798]: I1011 04:55:43.788370 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/f2bda97c-c22b-4c25-8754-3ed1fbfa0ff2-host\") pod \"crc-debug-mvc8m\" (UID: \"f2bda97c-c22b-4c25-8754-3ed1fbfa0ff2\") " pod="openshift-must-gather-6rjbn/crc-debug-mvc8m" Oct 11 04:55:43 crc kubenswrapper[4798]: I1011 04:55:43.810973 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xgt74\" (UniqueName: \"kubernetes.io/projected/f2bda97c-c22b-4c25-8754-3ed1fbfa0ff2-kube-api-access-xgt74\") pod \"crc-debug-mvc8m\" (UID: \"f2bda97c-c22b-4c25-8754-3ed1fbfa0ff2\") " pod="openshift-must-gather-6rjbn/crc-debug-mvc8m" Oct 11 04:55:43 crc kubenswrapper[4798]: I1011 04:55:43.941519 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-6rjbn/crc-debug-mvc8m" Oct 11 04:55:44 crc kubenswrapper[4798]: I1011 04:55:44.225438 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-6rjbn/crc-debug-mvc8m" event={"ID":"f2bda97c-c22b-4c25-8754-3ed1fbfa0ff2","Type":"ContainerStarted","Data":"e9d0fc8293cd020811bc90ec58e0b55c113cb0f8f7d2c423f34666f10007c95a"} Oct 11 04:55:45 crc kubenswrapper[4798]: I1011 04:55:45.239421 4798 generic.go:334] "Generic (PLEG): container finished" podID="f2bda97c-c22b-4c25-8754-3ed1fbfa0ff2" containerID="cd6cc40464096d3d36577b64feeb77499d4710bfbc6c22cfc2336b869a9f67a7" exitCode=0 Oct 11 04:55:45 crc kubenswrapper[4798]: I1011 04:55:45.239548 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-6rjbn/crc-debug-mvc8m" event={"ID":"f2bda97c-c22b-4c25-8754-3ed1fbfa0ff2","Type":"ContainerDied","Data":"cd6cc40464096d3d36577b64feeb77499d4710bfbc6c22cfc2336b869a9f67a7"} Oct 11 04:55:45 crc kubenswrapper[4798]: I1011 04:55:45.670051 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-6rjbn/crc-debug-mvc8m"] Oct 11 04:55:45 crc kubenswrapper[4798]: I1011 04:55:45.681558 4798 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-6rjbn/crc-debug-mvc8m"] Oct 11 04:55:46 crc kubenswrapper[4798]: I1011 04:55:46.350306 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-6rjbn/crc-debug-mvc8m" Oct 11 04:55:46 crc kubenswrapper[4798]: I1011 04:55:46.447976 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/f2bda97c-c22b-4c25-8754-3ed1fbfa0ff2-host\") pod \"f2bda97c-c22b-4c25-8754-3ed1fbfa0ff2\" (UID: \"f2bda97c-c22b-4c25-8754-3ed1fbfa0ff2\") " Oct 11 04:55:46 crc kubenswrapper[4798]: I1011 04:55:46.448175 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f2bda97c-c22b-4c25-8754-3ed1fbfa0ff2-host" (OuterVolumeSpecName: "host") pod "f2bda97c-c22b-4c25-8754-3ed1fbfa0ff2" (UID: "f2bda97c-c22b-4c25-8754-3ed1fbfa0ff2"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 11 04:55:46 crc kubenswrapper[4798]: I1011 04:55:46.448704 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xgt74\" (UniqueName: \"kubernetes.io/projected/f2bda97c-c22b-4c25-8754-3ed1fbfa0ff2-kube-api-access-xgt74\") pod \"f2bda97c-c22b-4c25-8754-3ed1fbfa0ff2\" (UID: \"f2bda97c-c22b-4c25-8754-3ed1fbfa0ff2\") " Oct 11 04:55:46 crc kubenswrapper[4798]: I1011 04:55:46.449190 4798 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/f2bda97c-c22b-4c25-8754-3ed1fbfa0ff2-host\") on node \"crc\" DevicePath \"\"" Oct 11 04:55:46 crc kubenswrapper[4798]: I1011 04:55:46.472706 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f2bda97c-c22b-4c25-8754-3ed1fbfa0ff2-kube-api-access-xgt74" (OuterVolumeSpecName: "kube-api-access-xgt74") pod "f2bda97c-c22b-4c25-8754-3ed1fbfa0ff2" (UID: "f2bda97c-c22b-4c25-8754-3ed1fbfa0ff2"). InnerVolumeSpecName "kube-api-access-xgt74". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 04:55:46 crc kubenswrapper[4798]: I1011 04:55:46.552202 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xgt74\" (UniqueName: \"kubernetes.io/projected/f2bda97c-c22b-4c25-8754-3ed1fbfa0ff2-kube-api-access-xgt74\") on node \"crc\" DevicePath \"\"" Oct 11 04:55:46 crc kubenswrapper[4798]: I1011 04:55:46.961904 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-6rjbn/crc-debug-mh4fk"] Oct 11 04:55:46 crc kubenswrapper[4798]: E1011 04:55:46.962483 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f2bda97c-c22b-4c25-8754-3ed1fbfa0ff2" containerName="container-00" Oct 11 04:55:46 crc kubenswrapper[4798]: I1011 04:55:46.962507 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="f2bda97c-c22b-4c25-8754-3ed1fbfa0ff2" containerName="container-00" Oct 11 04:55:46 crc kubenswrapper[4798]: I1011 04:55:46.962700 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="f2bda97c-c22b-4c25-8754-3ed1fbfa0ff2" containerName="container-00" Oct 11 04:55:46 crc kubenswrapper[4798]: I1011 04:55:46.963697 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-6rjbn/crc-debug-mh4fk" Oct 11 04:55:47 crc kubenswrapper[4798]: I1011 04:55:47.065541 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/1f7f106c-2d8f-4871-9618-605d9d5090e8-host\") pod \"crc-debug-mh4fk\" (UID: \"1f7f106c-2d8f-4871-9618-605d9d5090e8\") " pod="openshift-must-gather-6rjbn/crc-debug-mh4fk" Oct 11 04:55:47 crc kubenswrapper[4798]: I1011 04:55:47.065651 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k9lxb\" (UniqueName: \"kubernetes.io/projected/1f7f106c-2d8f-4871-9618-605d9d5090e8-kube-api-access-k9lxb\") pod \"crc-debug-mh4fk\" (UID: \"1f7f106c-2d8f-4871-9618-605d9d5090e8\") " pod="openshift-must-gather-6rjbn/crc-debug-mh4fk" Oct 11 04:55:47 crc kubenswrapper[4798]: I1011 04:55:47.168499 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k9lxb\" (UniqueName: \"kubernetes.io/projected/1f7f106c-2d8f-4871-9618-605d9d5090e8-kube-api-access-k9lxb\") pod \"crc-debug-mh4fk\" (UID: \"1f7f106c-2d8f-4871-9618-605d9d5090e8\") " pod="openshift-must-gather-6rjbn/crc-debug-mh4fk" Oct 11 04:55:47 crc kubenswrapper[4798]: I1011 04:55:47.169413 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/1f7f106c-2d8f-4871-9618-605d9d5090e8-host\") pod \"crc-debug-mh4fk\" (UID: \"1f7f106c-2d8f-4871-9618-605d9d5090e8\") " pod="openshift-must-gather-6rjbn/crc-debug-mh4fk" Oct 11 04:55:47 crc kubenswrapper[4798]: I1011 04:55:47.169586 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/1f7f106c-2d8f-4871-9618-605d9d5090e8-host\") pod \"crc-debug-mh4fk\" (UID: \"1f7f106c-2d8f-4871-9618-605d9d5090e8\") " pod="openshift-must-gather-6rjbn/crc-debug-mh4fk" Oct 11 04:55:47 crc kubenswrapper[4798]: I1011 04:55:47.189956 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k9lxb\" (UniqueName: \"kubernetes.io/projected/1f7f106c-2d8f-4871-9618-605d9d5090e8-kube-api-access-k9lxb\") pod \"crc-debug-mh4fk\" (UID: \"1f7f106c-2d8f-4871-9618-605d9d5090e8\") " pod="openshift-must-gather-6rjbn/crc-debug-mh4fk" Oct 11 04:55:47 crc kubenswrapper[4798]: I1011 04:55:47.263899 4798 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e9d0fc8293cd020811bc90ec58e0b55c113cb0f8f7d2c423f34666f10007c95a" Oct 11 04:55:47 crc kubenswrapper[4798]: I1011 04:55:47.263977 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-6rjbn/crc-debug-mvc8m" Oct 11 04:55:47 crc kubenswrapper[4798]: I1011 04:55:47.280253 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-6rjbn/crc-debug-mh4fk" Oct 11 04:55:47 crc kubenswrapper[4798]: W1011 04:55:47.318010 4798 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1f7f106c_2d8f_4871_9618_605d9d5090e8.slice/crio-8e0e884e2bb6aa5df8a00d3610a434020b6b129055c963067a4abd6fb5c6e57a WatchSource:0}: Error finding container 8e0e884e2bb6aa5df8a00d3610a434020b6b129055c963067a4abd6fb5c6e57a: Status 404 returned error can't find the container with id 8e0e884e2bb6aa5df8a00d3610a434020b6b129055c963067a4abd6fb5c6e57a Oct 11 04:55:47 crc kubenswrapper[4798]: I1011 04:55:47.441026 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f2bda97c-c22b-4c25-8754-3ed1fbfa0ff2" path="/var/lib/kubelet/pods/f2bda97c-c22b-4c25-8754-3ed1fbfa0ff2/volumes" Oct 11 04:55:48 crc kubenswrapper[4798]: I1011 04:55:48.275441 4798 generic.go:334] "Generic (PLEG): container finished" podID="1f7f106c-2d8f-4871-9618-605d9d5090e8" containerID="2009b1a50368cca86e50560c68c6efc1559ae717ba1d25e68e59dc0a2584ee7d" exitCode=0 Oct 11 04:55:48 crc kubenswrapper[4798]: I1011 04:55:48.275528 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-6rjbn/crc-debug-mh4fk" event={"ID":"1f7f106c-2d8f-4871-9618-605d9d5090e8","Type":"ContainerDied","Data":"2009b1a50368cca86e50560c68c6efc1559ae717ba1d25e68e59dc0a2584ee7d"} Oct 11 04:55:48 crc kubenswrapper[4798]: I1011 04:55:48.276074 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-6rjbn/crc-debug-mh4fk" event={"ID":"1f7f106c-2d8f-4871-9618-605d9d5090e8","Type":"ContainerStarted","Data":"8e0e884e2bb6aa5df8a00d3610a434020b6b129055c963067a4abd6fb5c6e57a"} Oct 11 04:55:48 crc kubenswrapper[4798]: I1011 04:55:48.318744 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-6rjbn/crc-debug-mh4fk"] Oct 11 04:55:48 crc kubenswrapper[4798]: I1011 04:55:48.327954 4798 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-6rjbn/crc-debug-mh4fk"] Oct 11 04:55:49 crc kubenswrapper[4798]: I1011 04:55:49.416215 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-6rjbn/crc-debug-mh4fk" Oct 11 04:55:49 crc kubenswrapper[4798]: I1011 04:55:49.531804 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-k9lxb\" (UniqueName: \"kubernetes.io/projected/1f7f106c-2d8f-4871-9618-605d9d5090e8-kube-api-access-k9lxb\") pod \"1f7f106c-2d8f-4871-9618-605d9d5090e8\" (UID: \"1f7f106c-2d8f-4871-9618-605d9d5090e8\") " Oct 11 04:55:49 crc kubenswrapper[4798]: I1011 04:55:49.532140 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/1f7f106c-2d8f-4871-9618-605d9d5090e8-host\") pod \"1f7f106c-2d8f-4871-9618-605d9d5090e8\" (UID: \"1f7f106c-2d8f-4871-9618-605d9d5090e8\") " Oct 11 04:55:49 crc kubenswrapper[4798]: I1011 04:55:49.533219 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/1f7f106c-2d8f-4871-9618-605d9d5090e8-host" (OuterVolumeSpecName: "host") pod "1f7f106c-2d8f-4871-9618-605d9d5090e8" (UID: "1f7f106c-2d8f-4871-9618-605d9d5090e8"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 11 04:55:49 crc kubenswrapper[4798]: I1011 04:55:49.545236 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1f7f106c-2d8f-4871-9618-605d9d5090e8-kube-api-access-k9lxb" (OuterVolumeSpecName: "kube-api-access-k9lxb") pod "1f7f106c-2d8f-4871-9618-605d9d5090e8" (UID: "1f7f106c-2d8f-4871-9618-605d9d5090e8"). InnerVolumeSpecName "kube-api-access-k9lxb". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 04:55:49 crc kubenswrapper[4798]: I1011 04:55:49.636033 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-k9lxb\" (UniqueName: \"kubernetes.io/projected/1f7f106c-2d8f-4871-9618-605d9d5090e8-kube-api-access-k9lxb\") on node \"crc\" DevicePath \"\"" Oct 11 04:55:49 crc kubenswrapper[4798]: I1011 04:55:49.636104 4798 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/1f7f106c-2d8f-4871-9618-605d9d5090e8-host\") on node \"crc\" DevicePath \"\"" Oct 11 04:55:50 crc kubenswrapper[4798]: I1011 04:55:50.300566 4798 scope.go:117] "RemoveContainer" containerID="2009b1a50368cca86e50560c68c6efc1559ae717ba1d25e68e59dc0a2584ee7d" Oct 11 04:55:50 crc kubenswrapper[4798]: I1011 04:55:50.300646 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-6rjbn/crc-debug-mh4fk" Oct 11 04:55:51 crc kubenswrapper[4798]: I1011 04:55:51.424576 4798 scope.go:117] "RemoveContainer" containerID="115ccad5a85066a43a5690fe450fc2e02e8e332700c139e6cb8f105f9866a92d" Oct 11 04:55:51 crc kubenswrapper[4798]: E1011 04:55:51.425416 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h28s2_openshift-machine-config-operator(42571bc8-2186-4e3b-bba9-28f5a8f364d0)\"" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" Oct 11 04:55:51 crc kubenswrapper[4798]: I1011 04:55:51.435939 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1f7f106c-2d8f-4871-9618-605d9d5090e8" path="/var/lib/kubelet/pods/1f7f106c-2d8f-4871-9618-605d9d5090e8/volumes" Oct 11 04:56:01 crc kubenswrapper[4798]: I1011 04:56:01.621537 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-7466b4ffd-mfb97_6f108baa-f968-4e0a-8227-8cc389329a06/barbican-api/0.log" Oct 11 04:56:01 crc kubenswrapper[4798]: I1011 04:56:01.659177 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-7466b4ffd-mfb97_6f108baa-f968-4e0a-8227-8cc389329a06/barbican-api-log/0.log" Oct 11 04:56:01 crc kubenswrapper[4798]: I1011 04:56:01.936101 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-6574767f6b-8p24k_141c65d6-e8c4-4ae3-be7b-7adf75193efc/barbican-keystone-listener/0.log" Oct 11 04:56:01 crc kubenswrapper[4798]: I1011 04:56:01.952081 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-6574767f6b-8p24k_141c65d6-e8c4-4ae3-be7b-7adf75193efc/barbican-keystone-listener-log/0.log" Oct 11 04:56:02 crc kubenswrapper[4798]: I1011 04:56:02.179034 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-55d589c759-9xthl_708609f6-405f-4f80-a2ed-e749a3803884/barbican-worker/0.log" Oct 11 04:56:02 crc kubenswrapper[4798]: I1011 04:56:02.237716 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-55d589c759-9xthl_708609f6-405f-4f80-a2ed-e749a3803884/barbican-worker-log/0.log" Oct 11 04:56:02 crc kubenswrapper[4798]: I1011 04:56:02.479369 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_bootstrap-edpm-deployment-openstack-edpm-ipam-msblf_6c616614-d70b-48a1-864f-df99b3bb33f3/bootstrap-edpm-deployment-openstack-edpm-ipam/0.log" Oct 11 04:56:02 crc kubenswrapper[4798]: I1011 04:56:02.681416 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_f1864cfd-61a1-4cb5-ae10-af539f50abc4/ceilometer-central-agent/0.log" Oct 11 04:56:02 crc kubenswrapper[4798]: I1011 04:56:02.720065 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_f1864cfd-61a1-4cb5-ae10-af539f50abc4/ceilometer-notification-agent/0.log" Oct 11 04:56:02 crc kubenswrapper[4798]: I1011 04:56:02.846004 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_f1864cfd-61a1-4cb5-ae10-af539f50abc4/proxy-httpd/0.log" Oct 11 04:56:02 crc kubenswrapper[4798]: I1011 04:56:02.956915 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_f1864cfd-61a1-4cb5-ae10-af539f50abc4/sg-core/0.log" Oct 11 04:56:03 crc kubenswrapper[4798]: I1011 04:56:03.066079 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceph-client-edpm-deployment-openstack-edpm-ipam-2gxwv_31303b8b-3524-40dd-9534-baccdc1d5a70/ceph-client-edpm-deployment-openstack-edpm-ipam/0.log" Oct 11 04:56:03 crc kubenswrapper[4798]: I1011 04:56:03.204808 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-xjwr9_29216d22-5411-4bbd-b2c3-f643df3218c0/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam/0.log" Oct 11 04:56:03 crc kubenswrapper[4798]: I1011 04:56:03.437123 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_b454f77b-5618-4f7e-b603-9fec6030c732/cinder-api/0.log" Oct 11 04:56:03 crc kubenswrapper[4798]: I1011 04:56:03.517368 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_b454f77b-5618-4f7e-b603-9fec6030c732/cinder-api-log/0.log" Oct 11 04:56:04 crc kubenswrapper[4798]: I1011 04:56:04.023143 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-backup-0_f9dae869-d1e5-48c4-a08c-5f9e76f8a581/cinder-backup/0.log" Oct 11 04:56:04 crc kubenswrapper[4798]: I1011 04:56:04.065150 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-backup-0_f9dae869-d1e5-48c4-a08c-5f9e76f8a581/probe/0.log" Oct 11 04:56:04 crc kubenswrapper[4798]: I1011 04:56:04.129949 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_b88dab9f-9394-41b9-a314-ce7e36e021d8/cinder-scheduler/0.log" Oct 11 04:56:04 crc kubenswrapper[4798]: I1011 04:56:04.271140 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_b88dab9f-9394-41b9-a314-ce7e36e021d8/probe/0.log" Oct 11 04:56:04 crc kubenswrapper[4798]: I1011 04:56:04.389748 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-volume-volume1-0_6d69ada7-5b46-4e22-b304-8ca7957976c5/probe/0.log" Oct 11 04:56:04 crc kubenswrapper[4798]: I1011 04:56:04.433015 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-volume-volume1-0_6d69ada7-5b46-4e22-b304-8ca7957976c5/cinder-volume/0.log" Oct 11 04:56:04 crc kubenswrapper[4798]: I1011 04:56:04.627852 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_configure-network-edpm-deployment-openstack-edpm-ipam-dqnkj_c1de1dc5-3b9c-43b2-bb27-3f8e20b9f8c6/configure-network-edpm-deployment-openstack-edpm-ipam/0.log" Oct 11 04:56:04 crc kubenswrapper[4798]: I1011 04:56:04.669209 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_configure-os-edpm-deployment-openstack-edpm-ipam-8vffc_bcf6a25c-81f9-4a1f-bf5e-9c15b703c682/configure-os-edpm-deployment-openstack-edpm-ipam/0.log" Oct 11 04:56:04 crc kubenswrapper[4798]: I1011 04:56:04.881745 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-69655fd4bf-bvxl4_d5af4cb1-380b-4a81-87ff-b1eb52952273/init/0.log" Oct 11 04:56:05 crc kubenswrapper[4798]: I1011 04:56:05.118069 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-69655fd4bf-bvxl4_d5af4cb1-380b-4a81-87ff-b1eb52952273/dnsmasq-dns/0.log" Oct 11 04:56:05 crc kubenswrapper[4798]: I1011 04:56:05.177161 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-69655fd4bf-bvxl4_d5af4cb1-380b-4a81-87ff-b1eb52952273/init/0.log" Oct 11 04:56:05 crc kubenswrapper[4798]: I1011 04:56:05.189071 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_4207ecba-8fbe-4317-9292-8b7fd0d4df8c/glance-httpd/0.log" Oct 11 04:56:05 crc kubenswrapper[4798]: I1011 04:56:05.209945 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_4207ecba-8fbe-4317-9292-8b7fd0d4df8c/glance-log/0.log" Oct 11 04:56:05 crc kubenswrapper[4798]: I1011 04:56:05.366798 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_53c9a7f3-ce03-406a-8c8a-56f59838bf6b/glance-httpd/0.log" Oct 11 04:56:05 crc kubenswrapper[4798]: I1011 04:56:05.411290 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_53c9a7f3-ce03-406a-8c8a-56f59838bf6b/glance-log/0.log" Oct 11 04:56:05 crc kubenswrapper[4798]: I1011 04:56:05.575337 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_horizon-7b5998ffb-nn2rn_a3fe5ce5-561f-4775-8024-6c4896079b30/horizon/0.log" Oct 11 04:56:05 crc kubenswrapper[4798]: I1011 04:56:05.663930 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_horizon-7b5998ffb-nn2rn_a3fe5ce5-561f-4775-8024-6c4896079b30/horizon-log/0.log" Oct 11 04:56:05 crc kubenswrapper[4798]: I1011 04:56:05.719843 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_install-certs-edpm-deployment-openstack-edpm-ipam-qgt9f_5f08cfce-c132-471d-b2c8-e725c0c5b7ec/install-certs-edpm-deployment-openstack-edpm-ipam/0.log" Oct 11 04:56:05 crc kubenswrapper[4798]: I1011 04:56:05.780203 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_install-os-edpm-deployment-openstack-edpm-ipam-4fcfm_c8977d13-c86f-494d-a02e-b617e5e27fdb/install-os-edpm-deployment-openstack-edpm-ipam/0.log" Oct 11 04:56:06 crc kubenswrapper[4798]: I1011 04:56:06.026717 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_kube-state-metrics-0_a0b5cce2-9d60-4726-9660-bac9df48774f/kube-state-metrics/0.log" Oct 11 04:56:06 crc kubenswrapper[4798]: I1011 04:56:06.107900 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-bf585fdff-grnph_0d75e3b2-e53b-4823-9272-b7038c8b379c/keystone-api/0.log" Oct 11 04:56:06 crc kubenswrapper[4798]: I1011 04:56:06.192919 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_libvirt-edpm-deployment-openstack-edpm-ipam-649lj_8550f028-bbd9-455c-9c93-0e813f2a95ed/libvirt-edpm-deployment-openstack-edpm-ipam/0.log" Oct 11 04:56:06 crc kubenswrapper[4798]: I1011 04:56:06.304992 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_manila-api-0_5a696dfc-1e8c-4d2b-9f0e-70bea3ed6d01/manila-api-log/0.log" Oct 11 04:56:06 crc kubenswrapper[4798]: I1011 04:56:06.342342 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_manila-api-0_5a696dfc-1e8c-4d2b-9f0e-70bea3ed6d01/manila-api/0.log" Oct 11 04:56:06 crc kubenswrapper[4798]: I1011 04:56:06.406180 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_manila-db-create-b4ct7_b663620b-a5c4-40ff-a611-749eecb67c0a/mariadb-database-create/0.log" Oct 11 04:56:06 crc kubenswrapper[4798]: I1011 04:56:06.423736 4798 scope.go:117] "RemoveContainer" containerID="115ccad5a85066a43a5690fe450fc2e02e8e332700c139e6cb8f105f9866a92d" Oct 11 04:56:06 crc kubenswrapper[4798]: E1011 04:56:06.424029 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h28s2_openshift-machine-config-operator(42571bc8-2186-4e3b-bba9-28f5a8f364d0)\"" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" Oct 11 04:56:06 crc kubenswrapper[4798]: I1011 04:56:06.565611 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_manila-db-sync-2qjln_24db8f3f-0f6f-4c88-be58-7de7bdd3d5d2/manila-db-sync/0.log" Oct 11 04:56:06 crc kubenswrapper[4798]: I1011 04:56:06.672245 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_manila-dd57-account-create-bdglj_96e78519-e8bc-4849-894c-781a43a4bbf7/mariadb-account-create/0.log" Oct 11 04:56:06 crc kubenswrapper[4798]: I1011 04:56:06.781188 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_manila-scheduler-0_8a3790b4-be7a-434f-90c1-3c0b6623b2a5/probe/0.log" Oct 11 04:56:06 crc kubenswrapper[4798]: I1011 04:56:06.862182 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_manila-scheduler-0_8a3790b4-be7a-434f-90c1-3c0b6623b2a5/manila-scheduler/0.log" Oct 11 04:56:06 crc kubenswrapper[4798]: I1011 04:56:06.935504 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_manila-share-share1-0_49ae1ecc-49d1-4cea-b7c6-2eb0cc0eb582/probe/0.log" Oct 11 04:56:06 crc kubenswrapper[4798]: I1011 04:56:06.972547 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_manila-share-share1-0_49ae1ecc-49d1-4cea-b7c6-2eb0cc0eb582/manila-share/0.log" Oct 11 04:56:07 crc kubenswrapper[4798]: I1011 04:56:07.244527 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-5744f55857-rnj42_985c3452-9836-40f4-8b42-8082b04ffba3/neutron-api/0.log" Oct 11 04:56:07 crc kubenswrapper[4798]: I1011 04:56:07.274793 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-5744f55857-rnj42_985c3452-9836-40f4-8b42-8082b04ffba3/neutron-httpd/0.log" Oct 11 04:56:07 crc kubenswrapper[4798]: I1011 04:56:07.429879 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_memcached-0_a3433fc1-acd1-4b1e-9df2-578848000615/memcached/0.log" Oct 11 04:56:07 crc kubenswrapper[4798]: I1011 04:56:07.464062 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-metadata-edpm-deployment-openstack-edpm-ipam-k6ztk_ad721bd6-8a4d-47c7-b544-4d6c480d6fd2/neutron-metadata-edpm-deployment-openstack-edpm-ipam/0.log" Oct 11 04:56:07 crc kubenswrapper[4798]: I1011 04:56:07.839435 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_3498eff7-9fc6-42de-ab90-0df1ab533041/nova-api-log/0.log" Oct 11 04:56:07 crc kubenswrapper[4798]: I1011 04:56:07.855937 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell0-conductor-0_759110c9-4bac-404b-a088-4c0cd6c63d17/nova-cell0-conductor-conductor/0.log" Oct 11 04:56:07 crc kubenswrapper[4798]: I1011 04:56:07.857480 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_3498eff7-9fc6-42de-ab90-0df1ab533041/nova-api-api/0.log" Oct 11 04:56:08 crc kubenswrapper[4798]: I1011 04:56:08.037430 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-conductor-0_0206c382-c230-4d24-8d8e-7744bc1c1209/nova-cell1-conductor-conductor/0.log" Oct 11 04:56:08 crc kubenswrapper[4798]: I1011 04:56:08.164096 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-p4572_1a79dbc6-3c66-4728-8984-fecebe3eb6f6/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam/0.log" Oct 11 04:56:08 crc kubenswrapper[4798]: I1011 04:56:08.182838 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-novncproxy-0_ba9b4b7f-46a9-4e6e-9c35-4a114df18a64/nova-cell1-novncproxy-novncproxy/0.log" Oct 11 04:56:08 crc kubenswrapper[4798]: I1011 04:56:08.506071 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_9fa916be-a581-493a-ade0-f5683c1d31e1/nova-metadata-log/0.log" Oct 11 04:56:08 crc kubenswrapper[4798]: I1011 04:56:08.580648 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-scheduler-0_88bd4a12-3afa-47ca-8954-e83a63f635d9/nova-scheduler-scheduler/0.log" Oct 11 04:56:08 crc kubenswrapper[4798]: I1011 04:56:08.679197 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_2d1fad00-1405-4149-96d6-7ef60d34c4f1/mysql-bootstrap/0.log" Oct 11 04:56:08 crc kubenswrapper[4798]: I1011 04:56:08.865288 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_2d1fad00-1405-4149-96d6-7ef60d34c4f1/mysql-bootstrap/0.log" Oct 11 04:56:08 crc kubenswrapper[4798]: I1011 04:56:08.906846 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_54e4a0f3-c43f-418e-b105-fd19db3ce615/mysql-bootstrap/0.log" Oct 11 04:56:09 crc kubenswrapper[4798]: I1011 04:56:09.002851 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_2d1fad00-1405-4149-96d6-7ef60d34c4f1/galera/0.log" Oct 11 04:56:09 crc kubenswrapper[4798]: I1011 04:56:09.239499 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_54e4a0f3-c43f-418e-b105-fd19db3ce615/galera/0.log" Oct 11 04:56:09 crc kubenswrapper[4798]: I1011 04:56:09.268635 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_54e4a0f3-c43f-418e-b105-fd19db3ce615/mysql-bootstrap/0.log" Oct 11 04:56:09 crc kubenswrapper[4798]: I1011 04:56:09.272488 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstackclient_0ecd7207-e032-4433-9077-b023a4c891f3/openstackclient/0.log" Oct 11 04:56:09 crc kubenswrapper[4798]: I1011 04:56:09.449638 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_9fa916be-a581-493a-ade0-f5683c1d31e1/nova-metadata-metadata/0.log" Oct 11 04:56:09 crc kubenswrapper[4798]: I1011 04:56:09.535632 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-metrics-m5tjs_18b39a28-1973-440d-8294-c9aa434cd56d/openstack-network-exporter/0.log" Oct 11 04:56:09 crc kubenswrapper[4798]: I1011 04:56:09.621676 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-tk4gr_7f9da136-ab4f-4a2e-99e4-e546aa4220f9/ovsdb-server-init/0.log" Oct 11 04:56:09 crc kubenswrapper[4798]: I1011 04:56:09.814581 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-tk4gr_7f9da136-ab4f-4a2e-99e4-e546aa4220f9/ovs-vswitchd/0.log" Oct 11 04:56:09 crc kubenswrapper[4798]: I1011 04:56:09.838795 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-tk4gr_7f9da136-ab4f-4a2e-99e4-e546aa4220f9/ovsdb-server-init/0.log" Oct 11 04:56:09 crc kubenswrapper[4798]: I1011 04:56:09.849984 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-tk4gr_7f9da136-ab4f-4a2e-99e4-e546aa4220f9/ovsdb-server/0.log" Oct 11 04:56:09 crc kubenswrapper[4798]: I1011 04:56:09.939739 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-sv678_3e260c01-682b-4b79-9ebe-c06c29750bfe/ovn-controller/0.log" Oct 11 04:56:10 crc kubenswrapper[4798]: I1011 04:56:10.093188 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-edpm-deployment-openstack-edpm-ipam-vmqz7_08139862-0611-43b8-93e1-c5d9cd73e4d5/ovn-edpm-deployment-openstack-edpm-ipam/0.log" Oct 11 04:56:10 crc kubenswrapper[4798]: I1011 04:56:10.139186 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_2c3825ca-11d1-4b60-b0c5-2f4b80a7fd01/ovn-northd/0.log" Oct 11 04:56:10 crc kubenswrapper[4798]: I1011 04:56:10.180539 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_2c3825ca-11d1-4b60-b0c5-2f4b80a7fd01/openstack-network-exporter/0.log" Oct 11 04:56:10 crc kubenswrapper[4798]: I1011 04:56:10.319416 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_5fcb3fcb-1839-493f-b485-0bb9ea061bc2/openstack-network-exporter/0.log" Oct 11 04:56:10 crc kubenswrapper[4798]: I1011 04:56:10.401960 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_5fcb3fcb-1839-493f-b485-0bb9ea061bc2/ovsdbserver-nb/0.log" Oct 11 04:56:10 crc kubenswrapper[4798]: I1011 04:56:10.434371 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_76d0b2fe-e594-4a66-8358-d3aed19300ea/openstack-network-exporter/0.log" Oct 11 04:56:10 crc kubenswrapper[4798]: I1011 04:56:10.556061 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_76d0b2fe-e594-4a66-8358-d3aed19300ea/ovsdbserver-sb/0.log" Oct 11 04:56:10 crc kubenswrapper[4798]: I1011 04:56:10.651285 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-c67f5f784-ph78z_0843bdb2-fc67-4c61-991e-383ebdb67136/placement-api/0.log" Oct 11 04:56:10 crc kubenswrapper[4798]: I1011 04:56:10.717091 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-c67f5f784-ph78z_0843bdb2-fc67-4c61-991e-383ebdb67136/placement-log/0.log" Oct 11 04:56:10 crc kubenswrapper[4798]: I1011 04:56:10.793058 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_cdfee9bc-39ae-48a4-9645-7191a2ae2cd5/setup-container/0.log" Oct 11 04:56:11 crc kubenswrapper[4798]: I1011 04:56:11.025805 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_cdfee9bc-39ae-48a4-9645-7191a2ae2cd5/setup-container/0.log" Oct 11 04:56:11 crc kubenswrapper[4798]: I1011 04:56:11.036344 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_cdfee9bc-39ae-48a4-9645-7191a2ae2cd5/rabbitmq/0.log" Oct 11 04:56:11 crc kubenswrapper[4798]: I1011 04:56:11.072112 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_c95f2c7f-03e4-4ded-8ff7-8d7804dae8fe/setup-container/0.log" Oct 11 04:56:11 crc kubenswrapper[4798]: I1011 04:56:11.274829 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_c95f2c7f-03e4-4ded-8ff7-8d7804dae8fe/setup-container/0.log" Oct 11 04:56:11 crc kubenswrapper[4798]: I1011 04:56:11.281720 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_c95f2c7f-03e4-4ded-8ff7-8d7804dae8fe/rabbitmq/0.log" Oct 11 04:56:11 crc kubenswrapper[4798]: I1011 04:56:11.315048 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_reboot-os-edpm-deployment-openstack-edpm-ipam-qthb4_c7e25630-2ac6-435f-bba0-281aaee8a48d/reboot-os-edpm-deployment-openstack-edpm-ipam/0.log" Oct 11 04:56:11 crc kubenswrapper[4798]: I1011 04:56:11.574500 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_run-os-edpm-deployment-openstack-edpm-ipam-s9wvs_230b772a-a9d9-4318-ada4-a83901d636b5/run-os-edpm-deployment-openstack-edpm-ipam/0.log" Oct 11 04:56:11 crc kubenswrapper[4798]: I1011 04:56:11.628353 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_repo-setup-edpm-deployment-openstack-edpm-ipam-hl6zr_471b7b25-2875-473e-bb6e-0509b527b7d3/repo-setup-edpm-deployment-openstack-edpm-ipam/0.log" Oct 11 04:56:11 crc kubenswrapper[4798]: I1011 04:56:11.681682 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ssh-known-hosts-edpm-deployment-rznrc_e75ed635-7172-447f-ab1b-b9af46d15be1/ssh-known-hosts-edpm-deployment/0.log" Oct 11 04:56:11 crc kubenswrapper[4798]: I1011 04:56:11.851783 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_tempest-tests-tempest_b3b2c8d8-3245-48a3-be3a-099046cf7258/tempest-tests-tempest-tests-runner/0.log" Oct 11 04:56:11 crc kubenswrapper[4798]: I1011 04:56:11.897677 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_test-operator-logs-pod-tempest-tempest-tests-tempest_b522303e-6696-4b73-87a2-127b91d444af/test-operator-logs-container/0.log" Oct 11 04:56:12 crc kubenswrapper[4798]: I1011 04:56:12.125939 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_validate-network-edpm-deployment-openstack-edpm-ipam-bb949_57b977e1-ad34-4a35-a31e-fe6e1a0b32ee/validate-network-edpm-deployment-openstack-edpm-ipam/0.log" Oct 11 04:56:18 crc kubenswrapper[4798]: I1011 04:56:18.423749 4798 scope.go:117] "RemoveContainer" containerID="115ccad5a85066a43a5690fe450fc2e02e8e332700c139e6cb8f105f9866a92d" Oct 11 04:56:18 crc kubenswrapper[4798]: E1011 04:56:18.424599 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h28s2_openshift-machine-config-operator(42571bc8-2186-4e3b-bba9-28f5a8f364d0)\"" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" Oct 11 04:56:30 crc kubenswrapper[4798]: I1011 04:56:30.424129 4798 scope.go:117] "RemoveContainer" containerID="115ccad5a85066a43a5690fe450fc2e02e8e332700c139e6cb8f105f9866a92d" Oct 11 04:56:30 crc kubenswrapper[4798]: E1011 04:56:30.425575 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h28s2_openshift-machine-config-operator(42571bc8-2186-4e3b-bba9-28f5a8f364d0)\"" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" Oct 11 04:56:34 crc kubenswrapper[4798]: I1011 04:56:34.045937 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/manila-db-create-b4ct7"] Oct 11 04:56:34 crc kubenswrapper[4798]: I1011 04:56:34.057577 4798 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/manila-db-create-b4ct7"] Oct 11 04:56:35 crc kubenswrapper[4798]: I1011 04:56:35.438877 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b663620b-a5c4-40ff-a611-749eecb67c0a" path="/var/lib/kubelet/pods/b663620b-a5c4-40ff-a611-749eecb67c0a/volumes" Oct 11 04:56:35 crc kubenswrapper[4798]: I1011 04:56:35.629759 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_712f67a2d8a6423b4809894d753e10e23e7b19dde1bc675906c8bdb3c5h4g77_8fb1be05-a9d3-479c-880e-9e7c73efac29/util/0.log" Oct 11 04:56:35 crc kubenswrapper[4798]: I1011 04:56:35.872198 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_712f67a2d8a6423b4809894d753e10e23e7b19dde1bc675906c8bdb3c5h4g77_8fb1be05-a9d3-479c-880e-9e7c73efac29/util/0.log" Oct 11 04:56:35 crc kubenswrapper[4798]: I1011 04:56:35.891312 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_712f67a2d8a6423b4809894d753e10e23e7b19dde1bc675906c8bdb3c5h4g77_8fb1be05-a9d3-479c-880e-9e7c73efac29/pull/0.log" Oct 11 04:56:35 crc kubenswrapper[4798]: I1011 04:56:35.891379 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_712f67a2d8a6423b4809894d753e10e23e7b19dde1bc675906c8bdb3c5h4g77_8fb1be05-a9d3-479c-880e-9e7c73efac29/pull/0.log" Oct 11 04:56:36 crc kubenswrapper[4798]: I1011 04:56:36.075544 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_712f67a2d8a6423b4809894d753e10e23e7b19dde1bc675906c8bdb3c5h4g77_8fb1be05-a9d3-479c-880e-9e7c73efac29/util/0.log" Oct 11 04:56:36 crc kubenswrapper[4798]: I1011 04:56:36.094595 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_712f67a2d8a6423b4809894d753e10e23e7b19dde1bc675906c8bdb3c5h4g77_8fb1be05-a9d3-479c-880e-9e7c73efac29/pull/0.log" Oct 11 04:56:36 crc kubenswrapper[4798]: I1011 04:56:36.129995 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_712f67a2d8a6423b4809894d753e10e23e7b19dde1bc675906c8bdb3c5h4g77_8fb1be05-a9d3-479c-880e-9e7c73efac29/extract/0.log" Oct 11 04:56:36 crc kubenswrapper[4798]: I1011 04:56:36.251638 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-64f84fcdbb-pvqqn_7c70459a-7eb7-45e3-b6c0-e2b3c01e3cb7/kube-rbac-proxy/0.log" Oct 11 04:56:36 crc kubenswrapper[4798]: I1011 04:56:36.356384 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-59cdc64769-9cgvk_755c871f-8030-429f-9b9f-5b1a6c1e24e0/kube-rbac-proxy/0.log" Oct 11 04:56:36 crc kubenswrapper[4798]: I1011 04:56:36.373310 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-64f84fcdbb-pvqqn_7c70459a-7eb7-45e3-b6c0-e2b3c01e3cb7/manager/0.log" Oct 11 04:56:36 crc kubenswrapper[4798]: I1011 04:56:36.567811 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-59cdc64769-9cgvk_755c871f-8030-429f-9b9f-5b1a6c1e24e0/manager/0.log" Oct 11 04:56:36 crc kubenswrapper[4798]: I1011 04:56:36.574781 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-687df44cdb-x5n77_2c18a2f7-9a42-495c-bc9c-86750a381f5c/kube-rbac-proxy/0.log" Oct 11 04:56:36 crc kubenswrapper[4798]: I1011 04:56:36.593216 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-687df44cdb-x5n77_2c18a2f7-9a42-495c-bc9c-86750a381f5c/manager/0.log" Oct 11 04:56:36 crc kubenswrapper[4798]: I1011 04:56:36.812049 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-7f5fc6b5ff-4zk9f_4490c7b3-a500-4224-8597-abd71de4fa13/kube-rbac-proxy/0.log" Oct 11 04:56:36 crc kubenswrapper[4798]: I1011 04:56:36.904986 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-7f5fc6b5ff-4zk9f_4490c7b3-a500-4224-8597-abd71de4fa13/manager/0.log" Oct 11 04:56:37 crc kubenswrapper[4798]: I1011 04:56:37.009402 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-6d9967f8dd-lgszb_cc1b32c4-acec-47f4-95ce-88763c33ca81/kube-rbac-proxy/0.log" Oct 11 04:56:37 crc kubenswrapper[4798]: I1011 04:56:37.038900 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-6d9967f8dd-lgszb_cc1b32c4-acec-47f4-95ce-88763c33ca81/manager/0.log" Oct 11 04:56:37 crc kubenswrapper[4798]: I1011 04:56:37.112022 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-6d74794d9b-vpg7r_332f7594-84b2-4761-a067-a32c31469e4f/kube-rbac-proxy/0.log" Oct 11 04:56:37 crc kubenswrapper[4798]: I1011 04:56:37.233601 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-6d74794d9b-vpg7r_332f7594-84b2-4761-a067-a32c31469e4f/manager/0.log" Oct 11 04:56:37 crc kubenswrapper[4798]: I1011 04:56:37.316960 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-585fc5b659-zsdqq_01834c42-7ee0-4576-9136-5812fc37e1cc/kube-rbac-proxy/0.log" Oct 11 04:56:37 crc kubenswrapper[4798]: I1011 04:56:37.521763 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-585fc5b659-zsdqq_01834c42-7ee0-4576-9136-5812fc37e1cc/manager/0.log" Oct 11 04:56:37 crc kubenswrapper[4798]: I1011 04:56:37.528674 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-74cb5cbc49-p96zk_a168d198-c58b-4674-8286-72fb306036b2/kube-rbac-proxy/0.log" Oct 11 04:56:37 crc kubenswrapper[4798]: I1011 04:56:37.592781 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-74cb5cbc49-p96zk_a168d198-c58b-4674-8286-72fb306036b2/manager/0.log" Oct 11 04:56:37 crc kubenswrapper[4798]: I1011 04:56:37.748269 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-ddb98f99b-npmgn_2fe367a2-f6f5-47ea-97a5-75a8b79778fb/kube-rbac-proxy/0.log" Oct 11 04:56:37 crc kubenswrapper[4798]: I1011 04:56:37.910809 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-ddb98f99b-npmgn_2fe367a2-f6f5-47ea-97a5-75a8b79778fb/manager/0.log" Oct 11 04:56:37 crc kubenswrapper[4798]: I1011 04:56:37.967435 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-59578bc799-7kfp8_2448a4a6-cf4c-45ec-89cc-25621f444988/kube-rbac-proxy/0.log" Oct 11 04:56:38 crc kubenswrapper[4798]: I1011 04:56:38.073705 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-59578bc799-7kfp8_2448a4a6-cf4c-45ec-89cc-25621f444988/manager/0.log" Oct 11 04:56:38 crc kubenswrapper[4798]: I1011 04:56:38.155979 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-5777b4f897-l86k6_e9fb9720-fc76-47a8-b111-63b5bc2899da/kube-rbac-proxy/0.log" Oct 11 04:56:38 crc kubenswrapper[4798]: I1011 04:56:38.224958 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-5777b4f897-l86k6_e9fb9720-fc76-47a8-b111-63b5bc2899da/manager/0.log" Oct 11 04:56:38 crc kubenswrapper[4798]: I1011 04:56:38.387009 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-797d478b46-rwhzb_48b2c137-9726-4e99-b0ca-a3cdb3bc6a1b/kube-rbac-proxy/0.log" Oct 11 04:56:38 crc kubenswrapper[4798]: I1011 04:56:38.425797 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-797d478b46-rwhzb_48b2c137-9726-4e99-b0ca-a3cdb3bc6a1b/manager/0.log" Oct 11 04:56:38 crc kubenswrapper[4798]: I1011 04:56:38.539464 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-57bb74c7bf-6rfxm_7f1693fd-2ec7-4047-a2df-ffa5d7c94e67/kube-rbac-proxy/0.log" Oct 11 04:56:38 crc kubenswrapper[4798]: I1011 04:56:38.698264 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-57bb74c7bf-6rfxm_7f1693fd-2ec7-4047-a2df-ffa5d7c94e67/manager/0.log" Oct 11 04:56:38 crc kubenswrapper[4798]: I1011 04:56:38.732626 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-6d7c7ddf95-l9z67_13f1e30f-4d2b-4517-86ae-8dc8334b0841/kube-rbac-proxy/0.log" Oct 11 04:56:38 crc kubenswrapper[4798]: I1011 04:56:38.765775 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-6d7c7ddf95-l9z67_13f1e30f-4d2b-4517-86ae-8dc8334b0841/manager/0.log" Oct 11 04:56:38 crc kubenswrapper[4798]: I1011 04:56:38.907116 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-6cc7fb757dqqlc4_60303330-3699-43be-ba28-96cd788e1cf0/kube-rbac-proxy/0.log" Oct 11 04:56:38 crc kubenswrapper[4798]: I1011 04:56:38.951327 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-6cc7fb757dqqlc4_60303330-3699-43be-ba28-96cd788e1cf0/manager/0.log" Oct 11 04:56:39 crc kubenswrapper[4798]: I1011 04:56:39.029492 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-manager-7d8c4595d6-d6fzt_a0f0b4cf-9851-4d84-81e0-7130b9777793/kube-rbac-proxy/0.log" Oct 11 04:56:39 crc kubenswrapper[4798]: I1011 04:56:39.197380 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-operator-69766b5fb5-qk5q4_6e988a50-121d-47e0-ad52-8a1139fc8ad6/kube-rbac-proxy/0.log" Oct 11 04:56:39 crc kubenswrapper[4798]: I1011 04:56:39.394454 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-operator-69766b5fb5-qk5q4_6e988a50-121d-47e0-ad52-8a1139fc8ad6/operator/0.log" Oct 11 04:56:39 crc kubenswrapper[4798]: I1011 04:56:39.446852 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-index-nkfzg_18c8d96d-3c6b-480f-bea7-1bfbc2d871e3/registry-server/0.log" Oct 11 04:56:39 crc kubenswrapper[4798]: I1011 04:56:39.532262 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-869cc7797f-t29x4_9b7c0cf6-604f-43aa-ad27-0a2f54507751/kube-rbac-proxy/0.log" Oct 11 04:56:39 crc kubenswrapper[4798]: I1011 04:56:39.686887 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-869cc7797f-t29x4_9b7c0cf6-604f-43aa-ad27-0a2f54507751/manager/0.log" Oct 11 04:56:39 crc kubenswrapper[4798]: I1011 04:56:39.793438 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-664664cb68-jkj5p_d7c1b935-ea3d-4d7b-96ab-5eb98fbaaea8/kube-rbac-proxy/0.log" Oct 11 04:56:39 crc kubenswrapper[4798]: I1011 04:56:39.857576 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-664664cb68-jkj5p_d7c1b935-ea3d-4d7b-96ab-5eb98fbaaea8/manager/0.log" Oct 11 04:56:40 crc kubenswrapper[4798]: I1011 04:56:40.053770 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_rabbitmq-cluster-operator-manager-5f97d8c699-8h9p4_a368474b-c03b-43b2-96af-0bb78d9f6ee6/operator/0.log" Oct 11 04:56:40 crc kubenswrapper[4798]: I1011 04:56:40.106950 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-5f4d5dfdc6-fffgl_f0952c0e-9c58-446c-90eb-61c0f8f4d64a/kube-rbac-proxy/0.log" Oct 11 04:56:40 crc kubenswrapper[4798]: I1011 04:56:40.227227 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-5f4d5dfdc6-fffgl_f0952c0e-9c58-446c-90eb-61c0f8f4d64a/manager/0.log" Oct 11 04:56:40 crc kubenswrapper[4798]: I1011 04:56:40.357286 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-578874c84d-zg2f8_0aecec3e-5bdb-4da4-b0a5-b3f9c7512a94/kube-rbac-proxy/0.log" Oct 11 04:56:40 crc kubenswrapper[4798]: I1011 04:56:40.476905 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-578874c84d-zg2f8_0aecec3e-5bdb-4da4-b0a5-b3f9c7512a94/manager/0.log" Oct 11 04:56:40 crc kubenswrapper[4798]: I1011 04:56:40.487462 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-ffcdd6c94-4t4ft_2c332094-6f91-4065-829a-736e42bd6560/kube-rbac-proxy/0.log" Oct 11 04:56:40 crc kubenswrapper[4798]: I1011 04:56:40.565646 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-manager-7d8c4595d6-d6fzt_a0f0b4cf-9851-4d84-81e0-7130b9777793/manager/0.log" Oct 11 04:56:40 crc kubenswrapper[4798]: I1011 04:56:40.616497 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-ffcdd6c94-4t4ft_2c332094-6f91-4065-829a-736e42bd6560/manager/0.log" Oct 11 04:56:40 crc kubenswrapper[4798]: I1011 04:56:40.712959 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-646675d848-68hdd_eb92bfae-ca3f-4e70-9fa1-440a6154cb53/kube-rbac-proxy/0.log" Oct 11 04:56:40 crc kubenswrapper[4798]: I1011 04:56:40.755687 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-646675d848-68hdd_eb92bfae-ca3f-4e70-9fa1-440a6154cb53/manager/0.log" Oct 11 04:56:41 crc kubenswrapper[4798]: I1011 04:56:41.424766 4798 scope.go:117] "RemoveContainer" containerID="115ccad5a85066a43a5690fe450fc2e02e8e332700c139e6cb8f105f9866a92d" Oct 11 04:56:41 crc kubenswrapper[4798]: E1011 04:56:41.425081 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h28s2_openshift-machine-config-operator(42571bc8-2186-4e3b-bba9-28f5a8f364d0)\"" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" Oct 11 04:56:49 crc kubenswrapper[4798]: I1011 04:56:49.041582 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/manila-dd57-account-create-bdglj"] Oct 11 04:56:49 crc kubenswrapper[4798]: I1011 04:56:49.059727 4798 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/manila-dd57-account-create-bdglj"] Oct 11 04:56:49 crc kubenswrapper[4798]: I1011 04:56:49.444053 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="96e78519-e8bc-4849-894c-781a43a4bbf7" path="/var/lib/kubelet/pods/96e78519-e8bc-4849-894c-781a43a4bbf7/volumes" Oct 11 04:56:53 crc kubenswrapper[4798]: I1011 04:56:53.424413 4798 scope.go:117] "RemoveContainer" containerID="115ccad5a85066a43a5690fe450fc2e02e8e332700c139e6cb8f105f9866a92d" Oct 11 04:56:53 crc kubenswrapper[4798]: E1011 04:56:53.425273 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h28s2_openshift-machine-config-operator(42571bc8-2186-4e3b-bba9-28f5a8f364d0)\"" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" Oct 11 04:56:58 crc kubenswrapper[4798]: I1011 04:56:58.095660 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_control-plane-machine-set-operator-78cbb6b69f-jspml_f26ca59f-fc9c-4cbd-9c82-bf0b756f9b88/control-plane-machine-set-operator/0.log" Oct 11 04:56:58 crc kubenswrapper[4798]: I1011 04:56:58.196434 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-wqjfc_29c4d9ea-078d-4ed3-a56a-c0a29887b6a1/kube-rbac-proxy/0.log" Oct 11 04:56:58 crc kubenswrapper[4798]: I1011 04:56:58.272910 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-wqjfc_29c4d9ea-078d-4ed3-a56a-c0a29887b6a1/machine-api-operator/0.log" Oct 11 04:57:08 crc kubenswrapper[4798]: I1011 04:57:08.424288 4798 scope.go:117] "RemoveContainer" containerID="115ccad5a85066a43a5690fe450fc2e02e8e332700c139e6cb8f105f9866a92d" Oct 11 04:57:08 crc kubenswrapper[4798]: E1011 04:57:08.425756 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h28s2_openshift-machine-config-operator(42571bc8-2186-4e3b-bba9-28f5a8f364d0)\"" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" Oct 11 04:57:11 crc kubenswrapper[4798]: I1011 04:57:11.044802 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/manila-db-sync-2qjln"] Oct 11 04:57:11 crc kubenswrapper[4798]: I1011 04:57:11.061609 4798 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/manila-db-sync-2qjln"] Oct 11 04:57:11 crc kubenswrapper[4798]: I1011 04:57:11.252583 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-5b446d88c5-b7hmq_25dd9371-dab0-4e09-b2a3-da8a9dfe7135/cert-manager-controller/0.log" Oct 11 04:57:11 crc kubenswrapper[4798]: I1011 04:57:11.394448 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-cainjector-7f985d654d-fgz9c_5e28bfca-a266-4225-8f9f-4d7e71691047/cert-manager-cainjector/0.log" Oct 11 04:57:11 crc kubenswrapper[4798]: I1011 04:57:11.435210 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="24db8f3f-0f6f-4c88-be58-7de7bdd3d5d2" path="/var/lib/kubelet/pods/24db8f3f-0f6f-4c88-be58-7de7bdd3d5d2/volumes" Oct 11 04:57:11 crc kubenswrapper[4798]: I1011 04:57:11.459780 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-webhook-5655c58dd6-kkd7l_0564940f-8ec6-4a8f-8f1e-cda48ac953c2/cert-manager-webhook/0.log" Oct 11 04:57:23 crc kubenswrapper[4798]: I1011 04:57:23.425080 4798 scope.go:117] "RemoveContainer" containerID="115ccad5a85066a43a5690fe450fc2e02e8e332700c139e6cb8f105f9866a92d" Oct 11 04:57:23 crc kubenswrapper[4798]: E1011 04:57:23.425980 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h28s2_openshift-machine-config-operator(42571bc8-2186-4e3b-bba9-28f5a8f364d0)\"" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" Oct 11 04:57:24 crc kubenswrapper[4798]: I1011 04:57:24.028547 4798 scope.go:117] "RemoveContainer" containerID="bd8458670fb406e95377e711b47a821d6b9334460b5e2734a02469762fdb5879" Oct 11 04:57:24 crc kubenswrapper[4798]: I1011 04:57:24.056932 4798 scope.go:117] "RemoveContainer" containerID="d4f65dac1a1f9ebcda3786e32adbed8ba4fd6ee01b2ca4d9648ed054fbab9517" Oct 11 04:57:24 crc kubenswrapper[4798]: I1011 04:57:24.112618 4798 scope.go:117] "RemoveContainer" containerID="36de3b204f3c58d661bd6f6b68e8eaccd0c1efeecddab25a6706476386e43002" Oct 11 04:57:24 crc kubenswrapper[4798]: I1011 04:57:24.219336 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-console-plugin-6b874cbd85-dlwsb_4db43a23-4466-4600-8b86-a39c6bd23319/nmstate-console-plugin/0.log" Oct 11 04:57:24 crc kubenswrapper[4798]: I1011 04:57:24.467425 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-handler-gflt9_ac8be373-2f59-42fb-afb2-b4449dee5657/nmstate-handler/0.log" Oct 11 04:57:24 crc kubenswrapper[4798]: I1011 04:57:24.516196 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-fdff9cb8d-9j84j_4c1789de-d93e-434d-bbd3-d1603457248b/kube-rbac-proxy/0.log" Oct 11 04:57:24 crc kubenswrapper[4798]: I1011 04:57:24.544442 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-fdff9cb8d-9j84j_4c1789de-d93e-434d-bbd3-d1603457248b/nmstate-metrics/0.log" Oct 11 04:57:24 crc kubenswrapper[4798]: I1011 04:57:24.643302 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-operator-858ddd8f98-hgtkp_211ee79e-e933-462d-9bf0-a287315deab3/nmstate-operator/0.log" Oct 11 04:57:24 crc kubenswrapper[4798]: I1011 04:57:24.757943 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-webhook-6cdbc54649-ptpf8_93ae23c3-aeb5-4d02-b48d-f1741886e18c/nmstate-webhook/0.log" Oct 11 04:57:34 crc kubenswrapper[4798]: I1011 04:57:34.423725 4798 scope.go:117] "RemoveContainer" containerID="115ccad5a85066a43a5690fe450fc2e02e8e332700c139e6cb8f105f9866a92d" Oct 11 04:57:34 crc kubenswrapper[4798]: E1011 04:57:34.424679 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h28s2_openshift-machine-config-operator(42571bc8-2186-4e3b-bba9-28f5a8f364d0)\"" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" Oct 11 04:57:41 crc kubenswrapper[4798]: I1011 04:57:41.964872 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-68d546b9d8-qt94z_5d4e020e-a94b-4d1e-a0b2-3572df8c2c5e/kube-rbac-proxy/0.log" Oct 11 04:57:42 crc kubenswrapper[4798]: I1011 04:57:42.096140 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-68d546b9d8-qt94z_5d4e020e-a94b-4d1e-a0b2-3572df8c2c5e/controller/0.log" Oct 11 04:57:42 crc kubenswrapper[4798]: I1011 04:57:42.203520 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-7q5rr_41a4b406-9a78-4d67-828b-81a3ce16248f/cp-frr-files/0.log" Oct 11 04:57:42 crc kubenswrapper[4798]: I1011 04:57:42.447895 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-7q5rr_41a4b406-9a78-4d67-828b-81a3ce16248f/cp-reloader/0.log" Oct 11 04:57:42 crc kubenswrapper[4798]: I1011 04:57:42.448575 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-7q5rr_41a4b406-9a78-4d67-828b-81a3ce16248f/cp-metrics/0.log" Oct 11 04:57:42 crc kubenswrapper[4798]: I1011 04:57:42.458308 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-7q5rr_41a4b406-9a78-4d67-828b-81a3ce16248f/cp-frr-files/0.log" Oct 11 04:57:42 crc kubenswrapper[4798]: I1011 04:57:42.486135 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-7q5rr_41a4b406-9a78-4d67-828b-81a3ce16248f/cp-reloader/0.log" Oct 11 04:57:42 crc kubenswrapper[4798]: I1011 04:57:42.702651 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-7q5rr_41a4b406-9a78-4d67-828b-81a3ce16248f/cp-reloader/0.log" Oct 11 04:57:42 crc kubenswrapper[4798]: I1011 04:57:42.704917 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-7q5rr_41a4b406-9a78-4d67-828b-81a3ce16248f/cp-metrics/0.log" Oct 11 04:57:42 crc kubenswrapper[4798]: I1011 04:57:42.735476 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-7q5rr_41a4b406-9a78-4d67-828b-81a3ce16248f/cp-frr-files/0.log" Oct 11 04:57:42 crc kubenswrapper[4798]: I1011 04:57:42.763142 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-7q5rr_41a4b406-9a78-4d67-828b-81a3ce16248f/cp-metrics/0.log" Oct 11 04:57:42 crc kubenswrapper[4798]: I1011 04:57:42.950159 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-7q5rr_41a4b406-9a78-4d67-828b-81a3ce16248f/cp-frr-files/0.log" Oct 11 04:57:42 crc kubenswrapper[4798]: I1011 04:57:42.954380 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-7q5rr_41a4b406-9a78-4d67-828b-81a3ce16248f/cp-metrics/0.log" Oct 11 04:57:42 crc kubenswrapper[4798]: I1011 04:57:42.978412 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-7q5rr_41a4b406-9a78-4d67-828b-81a3ce16248f/cp-reloader/0.log" Oct 11 04:57:43 crc kubenswrapper[4798]: I1011 04:57:43.028325 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-7q5rr_41a4b406-9a78-4d67-828b-81a3ce16248f/controller/0.log" Oct 11 04:57:43 crc kubenswrapper[4798]: I1011 04:57:43.225883 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-7q5rr_41a4b406-9a78-4d67-828b-81a3ce16248f/frr-metrics/0.log" Oct 11 04:57:43 crc kubenswrapper[4798]: I1011 04:57:43.234049 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-7q5rr_41a4b406-9a78-4d67-828b-81a3ce16248f/kube-rbac-proxy/0.log" Oct 11 04:57:43 crc kubenswrapper[4798]: I1011 04:57:43.258551 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-7q5rr_41a4b406-9a78-4d67-828b-81a3ce16248f/kube-rbac-proxy-frr/0.log" Oct 11 04:57:43 crc kubenswrapper[4798]: I1011 04:57:43.494669 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-7q5rr_41a4b406-9a78-4d67-828b-81a3ce16248f/reloader/0.log" Oct 11 04:57:43 crc kubenswrapper[4798]: I1011 04:57:43.552774 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-webhook-server-64bf5d555-2mrbj_71cb0bdd-c787-4000-b0fd-f7a0e84fa145/frr-k8s-webhook-server/0.log" Oct 11 04:57:43 crc kubenswrapper[4798]: I1011 04:57:43.809008 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-controller-manager-7458f849d5-nbss8_d1544005-518c-4296-b846-c5b9ac3af4c0/manager/0.log" Oct 11 04:57:43 crc kubenswrapper[4798]: I1011 04:57:43.971177 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-webhook-server-5786d94797-8n6wg_dbb4b409-1678-4ae6-b584-9442c133945f/webhook-server/0.log" Oct 11 04:57:44 crc kubenswrapper[4798]: I1011 04:57:44.146334 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-zdkcf_8a76e293-0882-4754-b825-688511b6c234/kube-rbac-proxy/0.log" Oct 11 04:57:44 crc kubenswrapper[4798]: I1011 04:57:44.764991 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-7q5rr_41a4b406-9a78-4d67-828b-81a3ce16248f/frr/0.log" Oct 11 04:57:44 crc kubenswrapper[4798]: I1011 04:57:44.780656 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-zdkcf_8a76e293-0882-4754-b825-688511b6c234/speaker/0.log" Oct 11 04:57:48 crc kubenswrapper[4798]: I1011 04:57:48.423973 4798 scope.go:117] "RemoveContainer" containerID="115ccad5a85066a43a5690fe450fc2e02e8e332700c139e6cb8f105f9866a92d" Oct 11 04:57:48 crc kubenswrapper[4798]: E1011 04:57:48.426565 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h28s2_openshift-machine-config-operator(42571bc8-2186-4e3b-bba9-28f5a8f364d0)\"" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" Oct 11 04:57:58 crc kubenswrapper[4798]: I1011 04:57:58.660045 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d2vws4k_c25b8b69-843d-4c9e-80f8-0e41b7f8aec3/util/0.log" Oct 11 04:57:58 crc kubenswrapper[4798]: I1011 04:57:58.924831 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d2vws4k_c25b8b69-843d-4c9e-80f8-0e41b7f8aec3/pull/0.log" Oct 11 04:57:58 crc kubenswrapper[4798]: I1011 04:57:58.963507 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d2vws4k_c25b8b69-843d-4c9e-80f8-0e41b7f8aec3/pull/0.log" Oct 11 04:57:58 crc kubenswrapper[4798]: I1011 04:57:58.990704 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d2vws4k_c25b8b69-843d-4c9e-80f8-0e41b7f8aec3/util/0.log" Oct 11 04:57:59 crc kubenswrapper[4798]: I1011 04:57:59.111431 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d2vws4k_c25b8b69-843d-4c9e-80f8-0e41b7f8aec3/util/0.log" Oct 11 04:57:59 crc kubenswrapper[4798]: I1011 04:57:59.142323 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d2vws4k_c25b8b69-843d-4c9e-80f8-0e41b7f8aec3/pull/0.log" Oct 11 04:57:59 crc kubenswrapper[4798]: I1011 04:57:59.191866 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d2vws4k_c25b8b69-843d-4c9e-80f8-0e41b7f8aec3/extract/0.log" Oct 11 04:57:59 crc kubenswrapper[4798]: I1011 04:57:59.299702 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-pbct9_9c0c7752-53e3-489c-b668-370e654f482a/extract-utilities/0.log" Oct 11 04:57:59 crc kubenswrapper[4798]: I1011 04:57:59.495888 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-pbct9_9c0c7752-53e3-489c-b668-370e654f482a/extract-content/0.log" Oct 11 04:57:59 crc kubenswrapper[4798]: I1011 04:57:59.498145 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-pbct9_9c0c7752-53e3-489c-b668-370e654f482a/extract-content/0.log" Oct 11 04:57:59 crc kubenswrapper[4798]: I1011 04:57:59.507134 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-pbct9_9c0c7752-53e3-489c-b668-370e654f482a/extract-utilities/0.log" Oct 11 04:58:00 crc kubenswrapper[4798]: I1011 04:58:00.336906 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-pbct9_9c0c7752-53e3-489c-b668-370e654f482a/extract-utilities/0.log" Oct 11 04:58:00 crc kubenswrapper[4798]: I1011 04:58:00.408429 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-pbct9_9c0c7752-53e3-489c-b668-370e654f482a/extract-content/0.log" Oct 11 04:58:00 crc kubenswrapper[4798]: I1011 04:58:00.624766 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-drk2l_dfc6a2a4-b981-4d28-be73-8d6689b028cd/extract-utilities/0.log" Oct 11 04:58:00 crc kubenswrapper[4798]: I1011 04:58:00.854087 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-drk2l_dfc6a2a4-b981-4d28-be73-8d6689b028cd/extract-utilities/0.log" Oct 11 04:58:00 crc kubenswrapper[4798]: I1011 04:58:00.902160 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-drk2l_dfc6a2a4-b981-4d28-be73-8d6689b028cd/extract-content/0.log" Oct 11 04:58:00 crc kubenswrapper[4798]: I1011 04:58:00.907100 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-pbct9_9c0c7752-53e3-489c-b668-370e654f482a/registry-server/0.log" Oct 11 04:58:00 crc kubenswrapper[4798]: I1011 04:58:00.924010 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-drk2l_dfc6a2a4-b981-4d28-be73-8d6689b028cd/extract-content/0.log" Oct 11 04:58:01 crc kubenswrapper[4798]: I1011 04:58:01.169992 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-drk2l_dfc6a2a4-b981-4d28-be73-8d6689b028cd/extract-content/0.log" Oct 11 04:58:01 crc kubenswrapper[4798]: I1011 04:58:01.199402 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-drk2l_dfc6a2a4-b981-4d28-be73-8d6689b028cd/extract-utilities/0.log" Oct 11 04:58:01 crc kubenswrapper[4798]: I1011 04:58:01.424465 4798 scope.go:117] "RemoveContainer" containerID="115ccad5a85066a43a5690fe450fc2e02e8e332700c139e6cb8f105f9866a92d" Oct 11 04:58:01 crc kubenswrapper[4798]: E1011 04:58:01.424835 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h28s2_openshift-machine-config-operator(42571bc8-2186-4e3b-bba9-28f5a8f364d0)\"" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" Oct 11 04:58:01 crc kubenswrapper[4798]: I1011 04:58:01.481583 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835c55rlp_177b2fa2-7b2c-4483-a0c5-2c3cf6ceeed2/util/0.log" Oct 11 04:58:01 crc kubenswrapper[4798]: I1011 04:58:01.687362 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835c55rlp_177b2fa2-7b2c-4483-a0c5-2c3cf6ceeed2/pull/0.log" Oct 11 04:58:01 crc kubenswrapper[4798]: I1011 04:58:01.743617 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-drk2l_dfc6a2a4-b981-4d28-be73-8d6689b028cd/registry-server/0.log" Oct 11 04:58:01 crc kubenswrapper[4798]: I1011 04:58:01.791965 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835c55rlp_177b2fa2-7b2c-4483-a0c5-2c3cf6ceeed2/util/0.log" Oct 11 04:58:01 crc kubenswrapper[4798]: I1011 04:58:01.796928 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835c55rlp_177b2fa2-7b2c-4483-a0c5-2c3cf6ceeed2/pull/0.log" Oct 11 04:58:01 crc kubenswrapper[4798]: I1011 04:58:01.915937 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835c55rlp_177b2fa2-7b2c-4483-a0c5-2c3cf6ceeed2/util/0.log" Oct 11 04:58:01 crc kubenswrapper[4798]: I1011 04:58:01.926433 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835c55rlp_177b2fa2-7b2c-4483-a0c5-2c3cf6ceeed2/pull/0.log" Oct 11 04:58:01 crc kubenswrapper[4798]: I1011 04:58:01.992420 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835c55rlp_177b2fa2-7b2c-4483-a0c5-2c3cf6ceeed2/extract/0.log" Oct 11 04:58:02 crc kubenswrapper[4798]: I1011 04:58:02.022105 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-rkg2t_0d156e8f-2a95-406a-beb0-c9b7e36f9e8b/marketplace-operator/0.log" Oct 11 04:58:02 crc kubenswrapper[4798]: I1011 04:58:02.185590 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-v8lpr_462b253d-c42e-4640-acdc-defc5c34032f/extract-utilities/0.log" Oct 11 04:58:02 crc kubenswrapper[4798]: I1011 04:58:02.438670 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-v8lpr_462b253d-c42e-4640-acdc-defc5c34032f/extract-content/0.log" Oct 11 04:58:02 crc kubenswrapper[4798]: I1011 04:58:02.445736 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-v8lpr_462b253d-c42e-4640-acdc-defc5c34032f/extract-content/0.log" Oct 11 04:58:02 crc kubenswrapper[4798]: I1011 04:58:02.451895 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-v8lpr_462b253d-c42e-4640-acdc-defc5c34032f/extract-utilities/0.log" Oct 11 04:58:02 crc kubenswrapper[4798]: I1011 04:58:02.628623 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-v8lpr_462b253d-c42e-4640-acdc-defc5c34032f/extract-utilities/0.log" Oct 11 04:58:02 crc kubenswrapper[4798]: I1011 04:58:02.682651 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-h4bsh_5315221c-e6ab-4c3f-9d74-5bf456a5b78e/extract-utilities/0.log" Oct 11 04:58:02 crc kubenswrapper[4798]: I1011 04:58:02.714042 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-v8lpr_462b253d-c42e-4640-acdc-defc5c34032f/extract-content/0.log" Oct 11 04:58:02 crc kubenswrapper[4798]: I1011 04:58:02.836001 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-v8lpr_462b253d-c42e-4640-acdc-defc5c34032f/registry-server/0.log" Oct 11 04:58:02 crc kubenswrapper[4798]: I1011 04:58:02.930490 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-h4bsh_5315221c-e6ab-4c3f-9d74-5bf456a5b78e/extract-content/0.log" Oct 11 04:58:02 crc kubenswrapper[4798]: I1011 04:58:02.956544 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-h4bsh_5315221c-e6ab-4c3f-9d74-5bf456a5b78e/extract-utilities/0.log" Oct 11 04:58:02 crc kubenswrapper[4798]: I1011 04:58:02.988420 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-h4bsh_5315221c-e6ab-4c3f-9d74-5bf456a5b78e/extract-content/0.log" Oct 11 04:58:03 crc kubenswrapper[4798]: I1011 04:58:03.141507 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-h4bsh_5315221c-e6ab-4c3f-9d74-5bf456a5b78e/extract-content/0.log" Oct 11 04:58:03 crc kubenswrapper[4798]: I1011 04:58:03.164288 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-h4bsh_5315221c-e6ab-4c3f-9d74-5bf456a5b78e/extract-utilities/0.log" Oct 11 04:58:03 crc kubenswrapper[4798]: I1011 04:58:03.676001 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-h4bsh_5315221c-e6ab-4c3f-9d74-5bf456a5b78e/registry-server/0.log" Oct 11 04:58:13 crc kubenswrapper[4798]: I1011 04:58:13.424779 4798 scope.go:117] "RemoveContainer" containerID="115ccad5a85066a43a5690fe450fc2e02e8e332700c139e6cb8f105f9866a92d" Oct 11 04:58:13 crc kubenswrapper[4798]: E1011 04:58:13.426036 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h28s2_openshift-machine-config-operator(42571bc8-2186-4e3b-bba9-28f5a8f364d0)\"" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" Oct 11 04:58:27 crc kubenswrapper[4798]: I1011 04:58:27.464871 4798 scope.go:117] "RemoveContainer" containerID="115ccad5a85066a43a5690fe450fc2e02e8e332700c139e6cb8f105f9866a92d" Oct 11 04:58:27 crc kubenswrapper[4798]: E1011 04:58:27.466022 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h28s2_openshift-machine-config-operator(42571bc8-2186-4e3b-bba9-28f5a8f364d0)\"" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" Oct 11 04:58:42 crc kubenswrapper[4798]: I1011 04:58:42.424892 4798 scope.go:117] "RemoveContainer" containerID="115ccad5a85066a43a5690fe450fc2e02e8e332700c139e6cb8f105f9866a92d" Oct 11 04:58:42 crc kubenswrapper[4798]: E1011 04:58:42.425830 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h28s2_openshift-machine-config-operator(42571bc8-2186-4e3b-bba9-28f5a8f364d0)\"" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" Oct 11 04:58:56 crc kubenswrapper[4798]: I1011 04:58:56.423656 4798 scope.go:117] "RemoveContainer" containerID="115ccad5a85066a43a5690fe450fc2e02e8e332700c139e6cb8f105f9866a92d" Oct 11 04:58:56 crc kubenswrapper[4798]: E1011 04:58:56.424758 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h28s2_openshift-machine-config-operator(42571bc8-2186-4e3b-bba9-28f5a8f364d0)\"" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" Oct 11 04:59:11 crc kubenswrapper[4798]: I1011 04:59:11.424346 4798 scope.go:117] "RemoveContainer" containerID="115ccad5a85066a43a5690fe450fc2e02e8e332700c139e6cb8f105f9866a92d" Oct 11 04:59:12 crc kubenswrapper[4798]: I1011 04:59:12.584625 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" event={"ID":"42571bc8-2186-4e3b-bba9-28f5a8f364d0","Type":"ContainerStarted","Data":"1547835e5b38aaac070151fdc3778d195594e548ec1f9e8578c50470ab453c91"} Oct 11 04:59:53 crc kubenswrapper[4798]: I1011 04:59:53.137301 4798 generic.go:334] "Generic (PLEG): container finished" podID="2d61516a-feb6-4497-8e4c-4c91cb6618e9" containerID="6837eaa8e60b27d5c42d5c6d75533620642989586fd991e5ec749c6be19ba4a5" exitCode=0 Oct 11 04:59:53 crc kubenswrapper[4798]: I1011 04:59:53.137433 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-6rjbn/must-gather-wwgth" event={"ID":"2d61516a-feb6-4497-8e4c-4c91cb6618e9","Type":"ContainerDied","Data":"6837eaa8e60b27d5c42d5c6d75533620642989586fd991e5ec749c6be19ba4a5"} Oct 11 04:59:53 crc kubenswrapper[4798]: I1011 04:59:53.140252 4798 scope.go:117] "RemoveContainer" containerID="6837eaa8e60b27d5c42d5c6d75533620642989586fd991e5ec749c6be19ba4a5" Oct 11 04:59:53 crc kubenswrapper[4798]: I1011 04:59:53.699468 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-6rjbn_must-gather-wwgth_2d61516a-feb6-4497-8e4c-4c91cb6618e9/gather/0.log" Oct 11 05:00:00 crc kubenswrapper[4798]: I1011 05:00:00.157356 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29335980-q7lbs"] Oct 11 05:00:00 crc kubenswrapper[4798]: E1011 05:00:00.158630 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1f7f106c-2d8f-4871-9618-605d9d5090e8" containerName="container-00" Oct 11 05:00:00 crc kubenswrapper[4798]: I1011 05:00:00.158649 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="1f7f106c-2d8f-4871-9618-605d9d5090e8" containerName="container-00" Oct 11 05:00:00 crc kubenswrapper[4798]: I1011 05:00:00.158904 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="1f7f106c-2d8f-4871-9618-605d9d5090e8" containerName="container-00" Oct 11 05:00:00 crc kubenswrapper[4798]: I1011 05:00:00.159963 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29335980-q7lbs" Oct 11 05:00:00 crc kubenswrapper[4798]: I1011 05:00:00.165700 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Oct 11 05:00:00 crc kubenswrapper[4798]: I1011 05:00:00.167115 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Oct 11 05:00:00 crc kubenswrapper[4798]: I1011 05:00:00.182199 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29335980-q7lbs"] Oct 11 05:00:00 crc kubenswrapper[4798]: I1011 05:00:00.253296 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/26ec8154-ecfd-4c6a-9c54-510d6a052937-config-volume\") pod \"collect-profiles-29335980-q7lbs\" (UID: \"26ec8154-ecfd-4c6a-9c54-510d6a052937\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29335980-q7lbs" Oct 11 05:00:00 crc kubenswrapper[4798]: I1011 05:00:00.253421 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h8dpw\" (UniqueName: \"kubernetes.io/projected/26ec8154-ecfd-4c6a-9c54-510d6a052937-kube-api-access-h8dpw\") pod \"collect-profiles-29335980-q7lbs\" (UID: \"26ec8154-ecfd-4c6a-9c54-510d6a052937\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29335980-q7lbs" Oct 11 05:00:00 crc kubenswrapper[4798]: I1011 05:00:00.253489 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/26ec8154-ecfd-4c6a-9c54-510d6a052937-secret-volume\") pod \"collect-profiles-29335980-q7lbs\" (UID: \"26ec8154-ecfd-4c6a-9c54-510d6a052937\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29335980-q7lbs" Oct 11 05:00:00 crc kubenswrapper[4798]: I1011 05:00:00.355821 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/26ec8154-ecfd-4c6a-9c54-510d6a052937-config-volume\") pod \"collect-profiles-29335980-q7lbs\" (UID: \"26ec8154-ecfd-4c6a-9c54-510d6a052937\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29335980-q7lbs" Oct 11 05:00:00 crc kubenswrapper[4798]: I1011 05:00:00.355942 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h8dpw\" (UniqueName: \"kubernetes.io/projected/26ec8154-ecfd-4c6a-9c54-510d6a052937-kube-api-access-h8dpw\") pod \"collect-profiles-29335980-q7lbs\" (UID: \"26ec8154-ecfd-4c6a-9c54-510d6a052937\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29335980-q7lbs" Oct 11 05:00:00 crc kubenswrapper[4798]: I1011 05:00:00.355993 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/26ec8154-ecfd-4c6a-9c54-510d6a052937-secret-volume\") pod \"collect-profiles-29335980-q7lbs\" (UID: \"26ec8154-ecfd-4c6a-9c54-510d6a052937\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29335980-q7lbs" Oct 11 05:00:00 crc kubenswrapper[4798]: I1011 05:00:00.357019 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/26ec8154-ecfd-4c6a-9c54-510d6a052937-config-volume\") pod \"collect-profiles-29335980-q7lbs\" (UID: \"26ec8154-ecfd-4c6a-9c54-510d6a052937\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29335980-q7lbs" Oct 11 05:00:00 crc kubenswrapper[4798]: I1011 05:00:00.371081 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/26ec8154-ecfd-4c6a-9c54-510d6a052937-secret-volume\") pod \"collect-profiles-29335980-q7lbs\" (UID: \"26ec8154-ecfd-4c6a-9c54-510d6a052937\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29335980-q7lbs" Oct 11 05:00:00 crc kubenswrapper[4798]: I1011 05:00:00.374195 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h8dpw\" (UniqueName: \"kubernetes.io/projected/26ec8154-ecfd-4c6a-9c54-510d6a052937-kube-api-access-h8dpw\") pod \"collect-profiles-29335980-q7lbs\" (UID: \"26ec8154-ecfd-4c6a-9c54-510d6a052937\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29335980-q7lbs" Oct 11 05:00:00 crc kubenswrapper[4798]: I1011 05:00:00.497085 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29335980-q7lbs" Oct 11 05:00:01 crc kubenswrapper[4798]: I1011 05:00:01.108552 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29335980-q7lbs"] Oct 11 05:00:01 crc kubenswrapper[4798]: W1011 05:00:01.113452 4798 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod26ec8154_ecfd_4c6a_9c54_510d6a052937.slice/crio-8c388cd18d7b1c0c3b9ec1a799f01dc96317e4cdcbd1a8c1368b51943b5d639d WatchSource:0}: Error finding container 8c388cd18d7b1c0c3b9ec1a799f01dc96317e4cdcbd1a8c1368b51943b5d639d: Status 404 returned error can't find the container with id 8c388cd18d7b1c0c3b9ec1a799f01dc96317e4cdcbd1a8c1368b51943b5d639d Oct 11 05:00:01 crc kubenswrapper[4798]: I1011 05:00:01.238718 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29335980-q7lbs" event={"ID":"26ec8154-ecfd-4c6a-9c54-510d6a052937","Type":"ContainerStarted","Data":"8c388cd18d7b1c0c3b9ec1a799f01dc96317e4cdcbd1a8c1368b51943b5d639d"} Oct 11 05:00:01 crc kubenswrapper[4798]: I1011 05:00:01.820590 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-6rjbn/must-gather-wwgth"] Oct 11 05:00:01 crc kubenswrapper[4798]: I1011 05:00:01.821268 4798 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-must-gather-6rjbn/must-gather-wwgth" podUID="2d61516a-feb6-4497-8e4c-4c91cb6618e9" containerName="copy" containerID="cri-o://2d5e3bbc4db91851838d6c3e19a820a15212baf498672037b67d05ce2575dd99" gracePeriod=2 Oct 11 05:00:01 crc kubenswrapper[4798]: I1011 05:00:01.829199 4798 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-6rjbn/must-gather-wwgth"] Oct 11 05:00:02 crc kubenswrapper[4798]: I1011 05:00:02.253729 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-6rjbn_must-gather-wwgth_2d61516a-feb6-4497-8e4c-4c91cb6618e9/copy/0.log" Oct 11 05:00:02 crc kubenswrapper[4798]: I1011 05:00:02.254150 4798 generic.go:334] "Generic (PLEG): container finished" podID="2d61516a-feb6-4497-8e4c-4c91cb6618e9" containerID="2d5e3bbc4db91851838d6c3e19a820a15212baf498672037b67d05ce2575dd99" exitCode=143 Oct 11 05:00:02 crc kubenswrapper[4798]: I1011 05:00:02.254230 4798 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="53b19ca59ef214ed764c273a2d4614165d5b72620863b45d1b9408003b673a81" Oct 11 05:00:02 crc kubenswrapper[4798]: I1011 05:00:02.257189 4798 generic.go:334] "Generic (PLEG): container finished" podID="26ec8154-ecfd-4c6a-9c54-510d6a052937" containerID="191c32a6c56bb9b17ed87764a0b1c6882863d2c2f21e74b1c4bf2026aa386c12" exitCode=0 Oct 11 05:00:02 crc kubenswrapper[4798]: I1011 05:00:02.257267 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29335980-q7lbs" event={"ID":"26ec8154-ecfd-4c6a-9c54-510d6a052937","Type":"ContainerDied","Data":"191c32a6c56bb9b17ed87764a0b1c6882863d2c2f21e74b1c4bf2026aa386c12"} Oct 11 05:00:02 crc kubenswrapper[4798]: I1011 05:00:02.303260 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-6rjbn_must-gather-wwgth_2d61516a-feb6-4497-8e4c-4c91cb6618e9/copy/0.log" Oct 11 05:00:02 crc kubenswrapper[4798]: I1011 05:00:02.304402 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-6rjbn/must-gather-wwgth" Oct 11 05:00:02 crc kubenswrapper[4798]: I1011 05:00:02.403043 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/2d61516a-feb6-4497-8e4c-4c91cb6618e9-must-gather-output\") pod \"2d61516a-feb6-4497-8e4c-4c91cb6618e9\" (UID: \"2d61516a-feb6-4497-8e4c-4c91cb6618e9\") " Oct 11 05:00:02 crc kubenswrapper[4798]: I1011 05:00:02.403180 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fpxzp\" (UniqueName: \"kubernetes.io/projected/2d61516a-feb6-4497-8e4c-4c91cb6618e9-kube-api-access-fpxzp\") pod \"2d61516a-feb6-4497-8e4c-4c91cb6618e9\" (UID: \"2d61516a-feb6-4497-8e4c-4c91cb6618e9\") " Oct 11 05:00:02 crc kubenswrapper[4798]: I1011 05:00:02.417290 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2d61516a-feb6-4497-8e4c-4c91cb6618e9-kube-api-access-fpxzp" (OuterVolumeSpecName: "kube-api-access-fpxzp") pod "2d61516a-feb6-4497-8e4c-4c91cb6618e9" (UID: "2d61516a-feb6-4497-8e4c-4c91cb6618e9"). InnerVolumeSpecName "kube-api-access-fpxzp". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 05:00:02 crc kubenswrapper[4798]: I1011 05:00:02.506752 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fpxzp\" (UniqueName: \"kubernetes.io/projected/2d61516a-feb6-4497-8e4c-4c91cb6618e9-kube-api-access-fpxzp\") on node \"crc\" DevicePath \"\"" Oct 11 05:00:02 crc kubenswrapper[4798]: I1011 05:00:02.589017 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2d61516a-feb6-4497-8e4c-4c91cb6618e9-must-gather-output" (OuterVolumeSpecName: "must-gather-output") pod "2d61516a-feb6-4497-8e4c-4c91cb6618e9" (UID: "2d61516a-feb6-4497-8e4c-4c91cb6618e9"). InnerVolumeSpecName "must-gather-output". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 05:00:02 crc kubenswrapper[4798]: I1011 05:00:02.609316 4798 reconciler_common.go:293] "Volume detached for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/2d61516a-feb6-4497-8e4c-4c91cb6618e9-must-gather-output\") on node \"crc\" DevicePath \"\"" Oct 11 05:00:03 crc kubenswrapper[4798]: I1011 05:00:03.264919 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-6rjbn/must-gather-wwgth" Oct 11 05:00:03 crc kubenswrapper[4798]: I1011 05:00:03.445161 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2d61516a-feb6-4497-8e4c-4c91cb6618e9" path="/var/lib/kubelet/pods/2d61516a-feb6-4497-8e4c-4c91cb6618e9/volumes" Oct 11 05:00:03 crc kubenswrapper[4798]: I1011 05:00:03.630968 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29335980-q7lbs" Oct 11 05:00:03 crc kubenswrapper[4798]: I1011 05:00:03.731884 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/26ec8154-ecfd-4c6a-9c54-510d6a052937-secret-volume\") pod \"26ec8154-ecfd-4c6a-9c54-510d6a052937\" (UID: \"26ec8154-ecfd-4c6a-9c54-510d6a052937\") " Oct 11 05:00:03 crc kubenswrapper[4798]: I1011 05:00:03.732095 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h8dpw\" (UniqueName: \"kubernetes.io/projected/26ec8154-ecfd-4c6a-9c54-510d6a052937-kube-api-access-h8dpw\") pod \"26ec8154-ecfd-4c6a-9c54-510d6a052937\" (UID: \"26ec8154-ecfd-4c6a-9c54-510d6a052937\") " Oct 11 05:00:03 crc kubenswrapper[4798]: I1011 05:00:03.732199 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/26ec8154-ecfd-4c6a-9c54-510d6a052937-config-volume\") pod \"26ec8154-ecfd-4c6a-9c54-510d6a052937\" (UID: \"26ec8154-ecfd-4c6a-9c54-510d6a052937\") " Oct 11 05:00:03 crc kubenswrapper[4798]: I1011 05:00:03.733579 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/26ec8154-ecfd-4c6a-9c54-510d6a052937-config-volume" (OuterVolumeSpecName: "config-volume") pod "26ec8154-ecfd-4c6a-9c54-510d6a052937" (UID: "26ec8154-ecfd-4c6a-9c54-510d6a052937"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 05:00:03 crc kubenswrapper[4798]: I1011 05:00:03.733823 4798 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/26ec8154-ecfd-4c6a-9c54-510d6a052937-config-volume\") on node \"crc\" DevicePath \"\"" Oct 11 05:00:03 crc kubenswrapper[4798]: I1011 05:00:03.738454 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/26ec8154-ecfd-4c6a-9c54-510d6a052937-kube-api-access-h8dpw" (OuterVolumeSpecName: "kube-api-access-h8dpw") pod "26ec8154-ecfd-4c6a-9c54-510d6a052937" (UID: "26ec8154-ecfd-4c6a-9c54-510d6a052937"). InnerVolumeSpecName "kube-api-access-h8dpw". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 05:00:03 crc kubenswrapper[4798]: I1011 05:00:03.738651 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/26ec8154-ecfd-4c6a-9c54-510d6a052937-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "26ec8154-ecfd-4c6a-9c54-510d6a052937" (UID: "26ec8154-ecfd-4c6a-9c54-510d6a052937"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 05:00:03 crc kubenswrapper[4798]: I1011 05:00:03.836137 4798 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/26ec8154-ecfd-4c6a-9c54-510d6a052937-secret-volume\") on node \"crc\" DevicePath \"\"" Oct 11 05:00:03 crc kubenswrapper[4798]: I1011 05:00:03.836179 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h8dpw\" (UniqueName: \"kubernetes.io/projected/26ec8154-ecfd-4c6a-9c54-510d6a052937-kube-api-access-h8dpw\") on node \"crc\" DevicePath \"\"" Oct 11 05:00:04 crc kubenswrapper[4798]: I1011 05:00:04.276951 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29335980-q7lbs" event={"ID":"26ec8154-ecfd-4c6a-9c54-510d6a052937","Type":"ContainerDied","Data":"8c388cd18d7b1c0c3b9ec1a799f01dc96317e4cdcbd1a8c1368b51943b5d639d"} Oct 11 05:00:04 crc kubenswrapper[4798]: I1011 05:00:04.278069 4798 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8c388cd18d7b1c0c3b9ec1a799f01dc96317e4cdcbd1a8c1368b51943b5d639d" Oct 11 05:00:04 crc kubenswrapper[4798]: I1011 05:00:04.277012 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29335980-q7lbs" Oct 11 05:00:04 crc kubenswrapper[4798]: I1011 05:00:04.750063 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29335935-k7lwt"] Oct 11 05:00:04 crc kubenswrapper[4798]: I1011 05:00:04.757387 4798 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29335935-k7lwt"] Oct 11 05:00:05 crc kubenswrapper[4798]: I1011 05:00:05.437634 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9a510113-4967-4bc5-81db-405d51a75250" path="/var/lib/kubelet/pods/9a510113-4967-4bc5-81db-405d51a75250/volumes" Oct 11 05:00:24 crc kubenswrapper[4798]: I1011 05:00:24.283298 4798 scope.go:117] "RemoveContainer" containerID="af3930905326c3b9f080dfe11c73fec17f94806f2a97ac8659fcef5d8f14f5b2" Oct 11 05:00:28 crc kubenswrapper[4798]: I1011 05:00:28.356761 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-5cm9g/must-gather-fvqqz"] Oct 11 05:00:28 crc kubenswrapper[4798]: E1011 05:00:28.358317 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2d61516a-feb6-4497-8e4c-4c91cb6618e9" containerName="copy" Oct 11 05:00:28 crc kubenswrapper[4798]: I1011 05:00:28.358342 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="2d61516a-feb6-4497-8e4c-4c91cb6618e9" containerName="copy" Oct 11 05:00:28 crc kubenswrapper[4798]: E1011 05:00:28.358381 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="26ec8154-ecfd-4c6a-9c54-510d6a052937" containerName="collect-profiles" Oct 11 05:00:28 crc kubenswrapper[4798]: I1011 05:00:28.358416 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="26ec8154-ecfd-4c6a-9c54-510d6a052937" containerName="collect-profiles" Oct 11 05:00:28 crc kubenswrapper[4798]: E1011 05:00:28.358434 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2d61516a-feb6-4497-8e4c-4c91cb6618e9" containerName="gather" Oct 11 05:00:28 crc kubenswrapper[4798]: I1011 05:00:28.358443 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="2d61516a-feb6-4497-8e4c-4c91cb6618e9" containerName="gather" Oct 11 05:00:28 crc kubenswrapper[4798]: I1011 05:00:28.358705 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="2d61516a-feb6-4497-8e4c-4c91cb6618e9" containerName="gather" Oct 11 05:00:28 crc kubenswrapper[4798]: I1011 05:00:28.358726 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="26ec8154-ecfd-4c6a-9c54-510d6a052937" containerName="collect-profiles" Oct 11 05:00:28 crc kubenswrapper[4798]: I1011 05:00:28.358738 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="2d61516a-feb6-4497-8e4c-4c91cb6618e9" containerName="copy" Oct 11 05:00:28 crc kubenswrapper[4798]: I1011 05:00:28.360373 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-5cm9g/must-gather-fvqqz" Oct 11 05:00:28 crc kubenswrapper[4798]: I1011 05:00:28.365969 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-5cm9g"/"kube-root-ca.crt" Oct 11 05:00:28 crc kubenswrapper[4798]: I1011 05:00:28.371803 4798 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-5cm9g"/"openshift-service-ca.crt" Oct 11 05:00:28 crc kubenswrapper[4798]: I1011 05:00:28.387192 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-5cm9g/must-gather-fvqqz"] Oct 11 05:00:28 crc kubenswrapper[4798]: I1011 05:00:28.447114 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r7lgc\" (UniqueName: \"kubernetes.io/projected/da22053c-d880-465d-85ed-7bea8aaa0b1a-kube-api-access-r7lgc\") pod \"must-gather-fvqqz\" (UID: \"da22053c-d880-465d-85ed-7bea8aaa0b1a\") " pod="openshift-must-gather-5cm9g/must-gather-fvqqz" Oct 11 05:00:28 crc kubenswrapper[4798]: I1011 05:00:28.447192 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/da22053c-d880-465d-85ed-7bea8aaa0b1a-must-gather-output\") pod \"must-gather-fvqqz\" (UID: \"da22053c-d880-465d-85ed-7bea8aaa0b1a\") " pod="openshift-must-gather-5cm9g/must-gather-fvqqz" Oct 11 05:00:28 crc kubenswrapper[4798]: I1011 05:00:28.549296 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r7lgc\" (UniqueName: \"kubernetes.io/projected/da22053c-d880-465d-85ed-7bea8aaa0b1a-kube-api-access-r7lgc\") pod \"must-gather-fvqqz\" (UID: \"da22053c-d880-465d-85ed-7bea8aaa0b1a\") " pod="openshift-must-gather-5cm9g/must-gather-fvqqz" Oct 11 05:00:28 crc kubenswrapper[4798]: I1011 05:00:28.549345 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/da22053c-d880-465d-85ed-7bea8aaa0b1a-must-gather-output\") pod \"must-gather-fvqqz\" (UID: \"da22053c-d880-465d-85ed-7bea8aaa0b1a\") " pod="openshift-must-gather-5cm9g/must-gather-fvqqz" Oct 11 05:00:28 crc kubenswrapper[4798]: I1011 05:00:28.549795 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/da22053c-d880-465d-85ed-7bea8aaa0b1a-must-gather-output\") pod \"must-gather-fvqqz\" (UID: \"da22053c-d880-465d-85ed-7bea8aaa0b1a\") " pod="openshift-must-gather-5cm9g/must-gather-fvqqz" Oct 11 05:00:28 crc kubenswrapper[4798]: I1011 05:00:28.572153 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r7lgc\" (UniqueName: \"kubernetes.io/projected/da22053c-d880-465d-85ed-7bea8aaa0b1a-kube-api-access-r7lgc\") pod \"must-gather-fvqqz\" (UID: \"da22053c-d880-465d-85ed-7bea8aaa0b1a\") " pod="openshift-must-gather-5cm9g/must-gather-fvqqz" Oct 11 05:00:28 crc kubenswrapper[4798]: I1011 05:00:28.693538 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-5cm9g/must-gather-fvqqz" Oct 11 05:00:29 crc kubenswrapper[4798]: I1011 05:00:29.365274 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-5cm9g/must-gather-fvqqz"] Oct 11 05:00:29 crc kubenswrapper[4798]: I1011 05:00:29.576204 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-5cm9g/must-gather-fvqqz" event={"ID":"da22053c-d880-465d-85ed-7bea8aaa0b1a","Type":"ContainerStarted","Data":"07818105374be0aa97dde82353de36ed3a09e6d422c546c64854c1cbca5473b7"} Oct 11 05:00:30 crc kubenswrapper[4798]: I1011 05:00:30.596856 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-5cm9g/must-gather-fvqqz" event={"ID":"da22053c-d880-465d-85ed-7bea8aaa0b1a","Type":"ContainerStarted","Data":"d4f804ec67817409ee0c6df0700ea9b5c09c3818487b56d1d607fe388e3d9799"} Oct 11 05:00:30 crc kubenswrapper[4798]: I1011 05:00:30.599079 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-5cm9g/must-gather-fvqqz" event={"ID":"da22053c-d880-465d-85ed-7bea8aaa0b1a","Type":"ContainerStarted","Data":"81831b3536d759b1e26ee5bc6b4cf509b6bacfb86541be1fc3834f0babc7499b"} Oct 11 05:00:30 crc kubenswrapper[4798]: I1011 05:00:30.623409 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-5cm9g/must-gather-fvqqz" podStartSLOduration=2.623368975 podStartE2EDuration="2.623368975s" podCreationTimestamp="2025-10-11 05:00:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 05:00:30.617240211 +0000 UTC m=+3925.953529897" watchObservedRunningTime="2025-10-11 05:00:30.623368975 +0000 UTC m=+3925.959658661" Oct 11 05:00:33 crc kubenswrapper[4798]: I1011 05:00:33.995190 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-5cm9g/crc-debug-qtq2b"] Oct 11 05:00:33 crc kubenswrapper[4798]: I1011 05:00:33.997448 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-5cm9g/crc-debug-qtq2b" Oct 11 05:00:33 crc kubenswrapper[4798]: I1011 05:00:33.999660 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-must-gather-5cm9g"/"default-dockercfg-4sl4k" Oct 11 05:00:34 crc kubenswrapper[4798]: I1011 05:00:34.095576 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-577cj\" (UniqueName: \"kubernetes.io/projected/395f03cc-ff0e-4a5a-a5fe-5498965a5137-kube-api-access-577cj\") pod \"crc-debug-qtq2b\" (UID: \"395f03cc-ff0e-4a5a-a5fe-5498965a5137\") " pod="openshift-must-gather-5cm9g/crc-debug-qtq2b" Oct 11 05:00:34 crc kubenswrapper[4798]: I1011 05:00:34.095828 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/395f03cc-ff0e-4a5a-a5fe-5498965a5137-host\") pod \"crc-debug-qtq2b\" (UID: \"395f03cc-ff0e-4a5a-a5fe-5498965a5137\") " pod="openshift-must-gather-5cm9g/crc-debug-qtq2b" Oct 11 05:00:34 crc kubenswrapper[4798]: I1011 05:00:34.197682 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/395f03cc-ff0e-4a5a-a5fe-5498965a5137-host\") pod \"crc-debug-qtq2b\" (UID: \"395f03cc-ff0e-4a5a-a5fe-5498965a5137\") " pod="openshift-must-gather-5cm9g/crc-debug-qtq2b" Oct 11 05:00:34 crc kubenswrapper[4798]: I1011 05:00:34.197814 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/395f03cc-ff0e-4a5a-a5fe-5498965a5137-host\") pod \"crc-debug-qtq2b\" (UID: \"395f03cc-ff0e-4a5a-a5fe-5498965a5137\") " pod="openshift-must-gather-5cm9g/crc-debug-qtq2b" Oct 11 05:00:34 crc kubenswrapper[4798]: I1011 05:00:34.197849 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-577cj\" (UniqueName: \"kubernetes.io/projected/395f03cc-ff0e-4a5a-a5fe-5498965a5137-kube-api-access-577cj\") pod \"crc-debug-qtq2b\" (UID: \"395f03cc-ff0e-4a5a-a5fe-5498965a5137\") " pod="openshift-must-gather-5cm9g/crc-debug-qtq2b" Oct 11 05:00:34 crc kubenswrapper[4798]: I1011 05:00:34.800229 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-577cj\" (UniqueName: \"kubernetes.io/projected/395f03cc-ff0e-4a5a-a5fe-5498965a5137-kube-api-access-577cj\") pod \"crc-debug-qtq2b\" (UID: \"395f03cc-ff0e-4a5a-a5fe-5498965a5137\") " pod="openshift-must-gather-5cm9g/crc-debug-qtq2b" Oct 11 05:00:34 crc kubenswrapper[4798]: I1011 05:00:34.921591 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-5cm9g/crc-debug-qtq2b" Oct 11 05:00:35 crc kubenswrapper[4798]: I1011 05:00:35.651963 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-5cm9g/crc-debug-qtq2b" event={"ID":"395f03cc-ff0e-4a5a-a5fe-5498965a5137","Type":"ContainerStarted","Data":"a04182e874e003a5301080ef043474e80caa95cc95c04f5d12c3e40d1e69fd1d"} Oct 11 05:00:35 crc kubenswrapper[4798]: I1011 05:00:35.652632 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-5cm9g/crc-debug-qtq2b" event={"ID":"395f03cc-ff0e-4a5a-a5fe-5498965a5137","Type":"ContainerStarted","Data":"09f066b2d4e6d18a2920c6172f4b5903acb9ae0d6f9d7e2292619eb852a5eaaf"} Oct 11 05:00:35 crc kubenswrapper[4798]: I1011 05:00:35.669739 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-5cm9g/crc-debug-qtq2b" podStartSLOduration=2.669703598 podStartE2EDuration="2.669703598s" podCreationTimestamp="2025-10-11 05:00:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 05:00:35.668851838 +0000 UTC m=+3931.005141524" watchObservedRunningTime="2025-10-11 05:00:35.669703598 +0000 UTC m=+3931.005993284" Oct 11 05:01:00 crc kubenswrapper[4798]: I1011 05:01:00.158853 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-cron-29335981-c7vhp"] Oct 11 05:01:00 crc kubenswrapper[4798]: I1011 05:01:00.161054 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29335981-c7vhp" Oct 11 05:01:00 crc kubenswrapper[4798]: I1011 05:01:00.185160 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29335981-c7vhp"] Oct 11 05:01:00 crc kubenswrapper[4798]: I1011 05:01:00.245093 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l7gb2\" (UniqueName: \"kubernetes.io/projected/1c359100-3221-4180-9971-65bba2660d15-kube-api-access-l7gb2\") pod \"keystone-cron-29335981-c7vhp\" (UID: \"1c359100-3221-4180-9971-65bba2660d15\") " pod="openstack/keystone-cron-29335981-c7vhp" Oct 11 05:01:00 crc kubenswrapper[4798]: I1011 05:01:00.245241 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/1c359100-3221-4180-9971-65bba2660d15-fernet-keys\") pod \"keystone-cron-29335981-c7vhp\" (UID: \"1c359100-3221-4180-9971-65bba2660d15\") " pod="openstack/keystone-cron-29335981-c7vhp" Oct 11 05:01:00 crc kubenswrapper[4798]: I1011 05:01:00.245338 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1c359100-3221-4180-9971-65bba2660d15-config-data\") pod \"keystone-cron-29335981-c7vhp\" (UID: \"1c359100-3221-4180-9971-65bba2660d15\") " pod="openstack/keystone-cron-29335981-c7vhp" Oct 11 05:01:00 crc kubenswrapper[4798]: I1011 05:01:00.245509 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1c359100-3221-4180-9971-65bba2660d15-combined-ca-bundle\") pod \"keystone-cron-29335981-c7vhp\" (UID: \"1c359100-3221-4180-9971-65bba2660d15\") " pod="openstack/keystone-cron-29335981-c7vhp" Oct 11 05:01:00 crc kubenswrapper[4798]: I1011 05:01:00.347487 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1c359100-3221-4180-9971-65bba2660d15-combined-ca-bundle\") pod \"keystone-cron-29335981-c7vhp\" (UID: \"1c359100-3221-4180-9971-65bba2660d15\") " pod="openstack/keystone-cron-29335981-c7vhp" Oct 11 05:01:00 crc kubenswrapper[4798]: I1011 05:01:00.347571 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l7gb2\" (UniqueName: \"kubernetes.io/projected/1c359100-3221-4180-9971-65bba2660d15-kube-api-access-l7gb2\") pod \"keystone-cron-29335981-c7vhp\" (UID: \"1c359100-3221-4180-9971-65bba2660d15\") " pod="openstack/keystone-cron-29335981-c7vhp" Oct 11 05:01:00 crc kubenswrapper[4798]: I1011 05:01:00.347636 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/1c359100-3221-4180-9971-65bba2660d15-fernet-keys\") pod \"keystone-cron-29335981-c7vhp\" (UID: \"1c359100-3221-4180-9971-65bba2660d15\") " pod="openstack/keystone-cron-29335981-c7vhp" Oct 11 05:01:00 crc kubenswrapper[4798]: I1011 05:01:00.347689 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1c359100-3221-4180-9971-65bba2660d15-config-data\") pod \"keystone-cron-29335981-c7vhp\" (UID: \"1c359100-3221-4180-9971-65bba2660d15\") " pod="openstack/keystone-cron-29335981-c7vhp" Oct 11 05:01:00 crc kubenswrapper[4798]: I1011 05:01:00.354923 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1c359100-3221-4180-9971-65bba2660d15-config-data\") pod \"keystone-cron-29335981-c7vhp\" (UID: \"1c359100-3221-4180-9971-65bba2660d15\") " pod="openstack/keystone-cron-29335981-c7vhp" Oct 11 05:01:00 crc kubenswrapper[4798]: I1011 05:01:00.357238 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/1c359100-3221-4180-9971-65bba2660d15-fernet-keys\") pod \"keystone-cron-29335981-c7vhp\" (UID: \"1c359100-3221-4180-9971-65bba2660d15\") " pod="openstack/keystone-cron-29335981-c7vhp" Oct 11 05:01:00 crc kubenswrapper[4798]: I1011 05:01:00.358564 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1c359100-3221-4180-9971-65bba2660d15-combined-ca-bundle\") pod \"keystone-cron-29335981-c7vhp\" (UID: \"1c359100-3221-4180-9971-65bba2660d15\") " pod="openstack/keystone-cron-29335981-c7vhp" Oct 11 05:01:00 crc kubenswrapper[4798]: I1011 05:01:00.363959 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l7gb2\" (UniqueName: \"kubernetes.io/projected/1c359100-3221-4180-9971-65bba2660d15-kube-api-access-l7gb2\") pod \"keystone-cron-29335981-c7vhp\" (UID: \"1c359100-3221-4180-9971-65bba2660d15\") " pod="openstack/keystone-cron-29335981-c7vhp" Oct 11 05:01:00 crc kubenswrapper[4798]: I1011 05:01:00.486706 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29335981-c7vhp" Oct 11 05:01:00 crc kubenswrapper[4798]: I1011 05:01:00.947572 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29335981-c7vhp"] Oct 11 05:01:00 crc kubenswrapper[4798]: W1011 05:01:00.950902 4798 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1c359100_3221_4180_9971_65bba2660d15.slice/crio-4e93c25661f094c58573acc79e857b13e4cf006b1b3b7b5292e8eb87b2c6805a WatchSource:0}: Error finding container 4e93c25661f094c58573acc79e857b13e4cf006b1b3b7b5292e8eb87b2c6805a: Status 404 returned error can't find the container with id 4e93c25661f094c58573acc79e857b13e4cf006b1b3b7b5292e8eb87b2c6805a Oct 11 05:01:01 crc kubenswrapper[4798]: I1011 05:01:01.927162 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29335981-c7vhp" event={"ID":"1c359100-3221-4180-9971-65bba2660d15","Type":"ContainerStarted","Data":"cf1e7cd0af8e8b30adbd779538a4a71adcb5cb87db79bf7e7a097ab2e5b8a996"} Oct 11 05:01:01 crc kubenswrapper[4798]: I1011 05:01:01.927773 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29335981-c7vhp" event={"ID":"1c359100-3221-4180-9971-65bba2660d15","Type":"ContainerStarted","Data":"4e93c25661f094c58573acc79e857b13e4cf006b1b3b7b5292e8eb87b2c6805a"} Oct 11 05:01:01 crc kubenswrapper[4798]: I1011 05:01:01.950808 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-cron-29335981-c7vhp" podStartSLOduration=1.950788675 podStartE2EDuration="1.950788675s" podCreationTimestamp="2025-10-11 05:01:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 05:01:01.945663254 +0000 UTC m=+3957.281952940" watchObservedRunningTime="2025-10-11 05:01:01.950788675 +0000 UTC m=+3957.287078361" Oct 11 05:01:04 crc kubenswrapper[4798]: I1011 05:01:04.957979 4798 generic.go:334] "Generic (PLEG): container finished" podID="1c359100-3221-4180-9971-65bba2660d15" containerID="cf1e7cd0af8e8b30adbd779538a4a71adcb5cb87db79bf7e7a097ab2e5b8a996" exitCode=0 Oct 11 05:01:04 crc kubenswrapper[4798]: I1011 05:01:04.958058 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29335981-c7vhp" event={"ID":"1c359100-3221-4180-9971-65bba2660d15","Type":"ContainerDied","Data":"cf1e7cd0af8e8b30adbd779538a4a71adcb5cb87db79bf7e7a097ab2e5b8a996"} Oct 11 05:01:06 crc kubenswrapper[4798]: I1011 05:01:06.352021 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29335981-c7vhp" Oct 11 05:01:06 crc kubenswrapper[4798]: I1011 05:01:06.393812 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1c359100-3221-4180-9971-65bba2660d15-config-data\") pod \"1c359100-3221-4180-9971-65bba2660d15\" (UID: \"1c359100-3221-4180-9971-65bba2660d15\") " Oct 11 05:01:06 crc kubenswrapper[4798]: I1011 05:01:06.393960 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/1c359100-3221-4180-9971-65bba2660d15-fernet-keys\") pod \"1c359100-3221-4180-9971-65bba2660d15\" (UID: \"1c359100-3221-4180-9971-65bba2660d15\") " Oct 11 05:01:06 crc kubenswrapper[4798]: I1011 05:01:06.394015 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1c359100-3221-4180-9971-65bba2660d15-combined-ca-bundle\") pod \"1c359100-3221-4180-9971-65bba2660d15\" (UID: \"1c359100-3221-4180-9971-65bba2660d15\") " Oct 11 05:01:06 crc kubenswrapper[4798]: I1011 05:01:06.394193 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l7gb2\" (UniqueName: \"kubernetes.io/projected/1c359100-3221-4180-9971-65bba2660d15-kube-api-access-l7gb2\") pod \"1c359100-3221-4180-9971-65bba2660d15\" (UID: \"1c359100-3221-4180-9971-65bba2660d15\") " Oct 11 05:01:06 crc kubenswrapper[4798]: I1011 05:01:06.402018 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1c359100-3221-4180-9971-65bba2660d15-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "1c359100-3221-4180-9971-65bba2660d15" (UID: "1c359100-3221-4180-9971-65bba2660d15"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 05:01:06 crc kubenswrapper[4798]: I1011 05:01:06.402797 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1c359100-3221-4180-9971-65bba2660d15-kube-api-access-l7gb2" (OuterVolumeSpecName: "kube-api-access-l7gb2") pod "1c359100-3221-4180-9971-65bba2660d15" (UID: "1c359100-3221-4180-9971-65bba2660d15"). InnerVolumeSpecName "kube-api-access-l7gb2". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 05:01:06 crc kubenswrapper[4798]: I1011 05:01:06.456457 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1c359100-3221-4180-9971-65bba2660d15-config-data" (OuterVolumeSpecName: "config-data") pod "1c359100-3221-4180-9971-65bba2660d15" (UID: "1c359100-3221-4180-9971-65bba2660d15"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 05:01:06 crc kubenswrapper[4798]: I1011 05:01:06.459380 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1c359100-3221-4180-9971-65bba2660d15-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "1c359100-3221-4180-9971-65bba2660d15" (UID: "1c359100-3221-4180-9971-65bba2660d15"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 05:01:06 crc kubenswrapper[4798]: I1011 05:01:06.497198 4798 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/1c359100-3221-4180-9971-65bba2660d15-fernet-keys\") on node \"crc\" DevicePath \"\"" Oct 11 05:01:06 crc kubenswrapper[4798]: I1011 05:01:06.497268 4798 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1c359100-3221-4180-9971-65bba2660d15-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 11 05:01:06 crc kubenswrapper[4798]: I1011 05:01:06.497282 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l7gb2\" (UniqueName: \"kubernetes.io/projected/1c359100-3221-4180-9971-65bba2660d15-kube-api-access-l7gb2\") on node \"crc\" DevicePath \"\"" Oct 11 05:01:06 crc kubenswrapper[4798]: I1011 05:01:06.497295 4798 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1c359100-3221-4180-9971-65bba2660d15-config-data\") on node \"crc\" DevicePath \"\"" Oct 11 05:01:07 crc kubenswrapper[4798]: I1011 05:01:07.002607 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29335981-c7vhp" event={"ID":"1c359100-3221-4180-9971-65bba2660d15","Type":"ContainerDied","Data":"4e93c25661f094c58573acc79e857b13e4cf006b1b3b7b5292e8eb87b2c6805a"} Oct 11 05:01:07 crc kubenswrapper[4798]: I1011 05:01:07.002815 4798 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4e93c25661f094c58573acc79e857b13e4cf006b1b3b7b5292e8eb87b2c6805a" Oct 11 05:01:07 crc kubenswrapper[4798]: I1011 05:01:07.002799 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29335981-c7vhp" Oct 11 05:01:10 crc kubenswrapper[4798]: I1011 05:01:10.032887 4798 generic.go:334] "Generic (PLEG): container finished" podID="395f03cc-ff0e-4a5a-a5fe-5498965a5137" containerID="a04182e874e003a5301080ef043474e80caa95cc95c04f5d12c3e40d1e69fd1d" exitCode=0 Oct 11 05:01:10 crc kubenswrapper[4798]: I1011 05:01:10.032989 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-5cm9g/crc-debug-qtq2b" event={"ID":"395f03cc-ff0e-4a5a-a5fe-5498965a5137","Type":"ContainerDied","Data":"a04182e874e003a5301080ef043474e80caa95cc95c04f5d12c3e40d1e69fd1d"} Oct 11 05:01:11 crc kubenswrapper[4798]: I1011 05:01:11.151993 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-5cm9g/crc-debug-qtq2b" Oct 11 05:01:11 crc kubenswrapper[4798]: I1011 05:01:11.216770 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-5cm9g/crc-debug-qtq2b"] Oct 11 05:01:11 crc kubenswrapper[4798]: I1011 05:01:11.231461 4798 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-5cm9g/crc-debug-qtq2b"] Oct 11 05:01:11 crc kubenswrapper[4798]: I1011 05:01:11.340544 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-577cj\" (UniqueName: \"kubernetes.io/projected/395f03cc-ff0e-4a5a-a5fe-5498965a5137-kube-api-access-577cj\") pod \"395f03cc-ff0e-4a5a-a5fe-5498965a5137\" (UID: \"395f03cc-ff0e-4a5a-a5fe-5498965a5137\") " Oct 11 05:01:11 crc kubenswrapper[4798]: I1011 05:01:11.340695 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/395f03cc-ff0e-4a5a-a5fe-5498965a5137-host\") pod \"395f03cc-ff0e-4a5a-a5fe-5498965a5137\" (UID: \"395f03cc-ff0e-4a5a-a5fe-5498965a5137\") " Oct 11 05:01:11 crc kubenswrapper[4798]: I1011 05:01:11.340802 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/395f03cc-ff0e-4a5a-a5fe-5498965a5137-host" (OuterVolumeSpecName: "host") pod "395f03cc-ff0e-4a5a-a5fe-5498965a5137" (UID: "395f03cc-ff0e-4a5a-a5fe-5498965a5137"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 11 05:01:11 crc kubenswrapper[4798]: I1011 05:01:11.341337 4798 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/395f03cc-ff0e-4a5a-a5fe-5498965a5137-host\") on node \"crc\" DevicePath \"\"" Oct 11 05:01:11 crc kubenswrapper[4798]: I1011 05:01:11.355769 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/395f03cc-ff0e-4a5a-a5fe-5498965a5137-kube-api-access-577cj" (OuterVolumeSpecName: "kube-api-access-577cj") pod "395f03cc-ff0e-4a5a-a5fe-5498965a5137" (UID: "395f03cc-ff0e-4a5a-a5fe-5498965a5137"). InnerVolumeSpecName "kube-api-access-577cj". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 05:01:11 crc kubenswrapper[4798]: I1011 05:01:11.435055 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="395f03cc-ff0e-4a5a-a5fe-5498965a5137" path="/var/lib/kubelet/pods/395f03cc-ff0e-4a5a-a5fe-5498965a5137/volumes" Oct 11 05:01:11 crc kubenswrapper[4798]: I1011 05:01:11.443798 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-577cj\" (UniqueName: \"kubernetes.io/projected/395f03cc-ff0e-4a5a-a5fe-5498965a5137-kube-api-access-577cj\") on node \"crc\" DevicePath \"\"" Oct 11 05:01:12 crc kubenswrapper[4798]: I1011 05:01:12.054448 4798 scope.go:117] "RemoveContainer" containerID="a04182e874e003a5301080ef043474e80caa95cc95c04f5d12c3e40d1e69fd1d" Oct 11 05:01:12 crc kubenswrapper[4798]: I1011 05:01:12.054542 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-5cm9g/crc-debug-qtq2b" Oct 11 05:01:12 crc kubenswrapper[4798]: I1011 05:01:12.386993 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-5cm9g/crc-debug-97jq7"] Oct 11 05:01:12 crc kubenswrapper[4798]: E1011 05:01:12.388201 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="395f03cc-ff0e-4a5a-a5fe-5498965a5137" containerName="container-00" Oct 11 05:01:12 crc kubenswrapper[4798]: I1011 05:01:12.388273 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="395f03cc-ff0e-4a5a-a5fe-5498965a5137" containerName="container-00" Oct 11 05:01:12 crc kubenswrapper[4798]: E1011 05:01:12.388380 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1c359100-3221-4180-9971-65bba2660d15" containerName="keystone-cron" Oct 11 05:01:12 crc kubenswrapper[4798]: I1011 05:01:12.388469 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="1c359100-3221-4180-9971-65bba2660d15" containerName="keystone-cron" Oct 11 05:01:12 crc kubenswrapper[4798]: I1011 05:01:12.388722 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="1c359100-3221-4180-9971-65bba2660d15" containerName="keystone-cron" Oct 11 05:01:12 crc kubenswrapper[4798]: I1011 05:01:12.388793 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="395f03cc-ff0e-4a5a-a5fe-5498965a5137" containerName="container-00" Oct 11 05:01:12 crc kubenswrapper[4798]: I1011 05:01:12.389562 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-5cm9g/crc-debug-97jq7" Oct 11 05:01:12 crc kubenswrapper[4798]: I1011 05:01:12.392256 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-must-gather-5cm9g"/"default-dockercfg-4sl4k" Oct 11 05:01:12 crc kubenswrapper[4798]: I1011 05:01:12.568561 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-57wcp\" (UniqueName: \"kubernetes.io/projected/6bf2054d-48d9-4b5a-a095-5e64f652a027-kube-api-access-57wcp\") pod \"crc-debug-97jq7\" (UID: \"6bf2054d-48d9-4b5a-a095-5e64f652a027\") " pod="openshift-must-gather-5cm9g/crc-debug-97jq7" Oct 11 05:01:12 crc kubenswrapper[4798]: I1011 05:01:12.568709 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/6bf2054d-48d9-4b5a-a095-5e64f652a027-host\") pod \"crc-debug-97jq7\" (UID: \"6bf2054d-48d9-4b5a-a095-5e64f652a027\") " pod="openshift-must-gather-5cm9g/crc-debug-97jq7" Oct 11 05:01:12 crc kubenswrapper[4798]: I1011 05:01:12.670954 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-57wcp\" (UniqueName: \"kubernetes.io/projected/6bf2054d-48d9-4b5a-a095-5e64f652a027-kube-api-access-57wcp\") pod \"crc-debug-97jq7\" (UID: \"6bf2054d-48d9-4b5a-a095-5e64f652a027\") " pod="openshift-must-gather-5cm9g/crc-debug-97jq7" Oct 11 05:01:12 crc kubenswrapper[4798]: I1011 05:01:12.671076 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/6bf2054d-48d9-4b5a-a095-5e64f652a027-host\") pod \"crc-debug-97jq7\" (UID: \"6bf2054d-48d9-4b5a-a095-5e64f652a027\") " pod="openshift-must-gather-5cm9g/crc-debug-97jq7" Oct 11 05:01:12 crc kubenswrapper[4798]: I1011 05:01:12.671168 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/6bf2054d-48d9-4b5a-a095-5e64f652a027-host\") pod \"crc-debug-97jq7\" (UID: \"6bf2054d-48d9-4b5a-a095-5e64f652a027\") " pod="openshift-must-gather-5cm9g/crc-debug-97jq7" Oct 11 05:01:12 crc kubenswrapper[4798]: I1011 05:01:12.694326 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-57wcp\" (UniqueName: \"kubernetes.io/projected/6bf2054d-48d9-4b5a-a095-5e64f652a027-kube-api-access-57wcp\") pod \"crc-debug-97jq7\" (UID: \"6bf2054d-48d9-4b5a-a095-5e64f652a027\") " pod="openshift-must-gather-5cm9g/crc-debug-97jq7" Oct 11 05:01:12 crc kubenswrapper[4798]: I1011 05:01:12.706805 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-5cm9g/crc-debug-97jq7" Oct 11 05:01:13 crc kubenswrapper[4798]: I1011 05:01:13.069236 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-5cm9g/crc-debug-97jq7" event={"ID":"6bf2054d-48d9-4b5a-a095-5e64f652a027","Type":"ContainerStarted","Data":"370cd36ad77d82f2a2aa3b96ee51726b9a6ace7379cae7c3ee4ffe37335fcd54"} Oct 11 05:01:13 crc kubenswrapper[4798]: I1011 05:01:13.069296 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-5cm9g/crc-debug-97jq7" event={"ID":"6bf2054d-48d9-4b5a-a095-5e64f652a027","Type":"ContainerStarted","Data":"69842df938376f24b098e932f68e6740830d6d1633a3681afda7604c15b4095b"} Oct 11 05:01:13 crc kubenswrapper[4798]: I1011 05:01:13.087476 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-5cm9g/crc-debug-97jq7" podStartSLOduration=1.08745124 podStartE2EDuration="1.08745124s" podCreationTimestamp="2025-10-11 05:01:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 05:01:13.082067954 +0000 UTC m=+3968.418357650" watchObservedRunningTime="2025-10-11 05:01:13.08745124 +0000 UTC m=+3968.423740956" Oct 11 05:01:14 crc kubenswrapper[4798]: I1011 05:01:14.082429 4798 generic.go:334] "Generic (PLEG): container finished" podID="6bf2054d-48d9-4b5a-a095-5e64f652a027" containerID="370cd36ad77d82f2a2aa3b96ee51726b9a6ace7379cae7c3ee4ffe37335fcd54" exitCode=0 Oct 11 05:01:14 crc kubenswrapper[4798]: I1011 05:01:14.082588 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-5cm9g/crc-debug-97jq7" event={"ID":"6bf2054d-48d9-4b5a-a095-5e64f652a027","Type":"ContainerDied","Data":"370cd36ad77d82f2a2aa3b96ee51726b9a6ace7379cae7c3ee4ffe37335fcd54"} Oct 11 05:01:15 crc kubenswrapper[4798]: I1011 05:01:15.219245 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-5cm9g/crc-debug-97jq7" Oct 11 05:01:15 crc kubenswrapper[4798]: I1011 05:01:15.224768 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-57wcp\" (UniqueName: \"kubernetes.io/projected/6bf2054d-48d9-4b5a-a095-5e64f652a027-kube-api-access-57wcp\") pod \"6bf2054d-48d9-4b5a-a095-5e64f652a027\" (UID: \"6bf2054d-48d9-4b5a-a095-5e64f652a027\") " Oct 11 05:01:15 crc kubenswrapper[4798]: I1011 05:01:15.225206 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/6bf2054d-48d9-4b5a-a095-5e64f652a027-host\") pod \"6bf2054d-48d9-4b5a-a095-5e64f652a027\" (UID: \"6bf2054d-48d9-4b5a-a095-5e64f652a027\") " Oct 11 05:01:15 crc kubenswrapper[4798]: I1011 05:01:15.225354 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/6bf2054d-48d9-4b5a-a095-5e64f652a027-host" (OuterVolumeSpecName: "host") pod "6bf2054d-48d9-4b5a-a095-5e64f652a027" (UID: "6bf2054d-48d9-4b5a-a095-5e64f652a027"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 11 05:01:15 crc kubenswrapper[4798]: I1011 05:01:15.226546 4798 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/6bf2054d-48d9-4b5a-a095-5e64f652a027-host\") on node \"crc\" DevicePath \"\"" Oct 11 05:01:15 crc kubenswrapper[4798]: I1011 05:01:15.234317 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6bf2054d-48d9-4b5a-a095-5e64f652a027-kube-api-access-57wcp" (OuterVolumeSpecName: "kube-api-access-57wcp") pod "6bf2054d-48d9-4b5a-a095-5e64f652a027" (UID: "6bf2054d-48d9-4b5a-a095-5e64f652a027"). InnerVolumeSpecName "kube-api-access-57wcp". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 05:01:15 crc kubenswrapper[4798]: I1011 05:01:15.275457 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-5cm9g/crc-debug-97jq7"] Oct 11 05:01:15 crc kubenswrapper[4798]: I1011 05:01:15.289451 4798 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-5cm9g/crc-debug-97jq7"] Oct 11 05:01:15 crc kubenswrapper[4798]: I1011 05:01:15.328747 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-57wcp\" (UniqueName: \"kubernetes.io/projected/6bf2054d-48d9-4b5a-a095-5e64f652a027-kube-api-access-57wcp\") on node \"crc\" DevicePath \"\"" Oct 11 05:01:15 crc kubenswrapper[4798]: I1011 05:01:15.436420 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6bf2054d-48d9-4b5a-a095-5e64f652a027" path="/var/lib/kubelet/pods/6bf2054d-48d9-4b5a-a095-5e64f652a027/volumes" Oct 11 05:01:16 crc kubenswrapper[4798]: I1011 05:01:16.104974 4798 scope.go:117] "RemoveContainer" containerID="370cd36ad77d82f2a2aa3b96ee51726b9a6ace7379cae7c3ee4ffe37335fcd54" Oct 11 05:01:16 crc kubenswrapper[4798]: I1011 05:01:16.105027 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-5cm9g/crc-debug-97jq7" Oct 11 05:01:16 crc kubenswrapper[4798]: I1011 05:01:16.447569 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-5cm9g/crc-debug-xdjpq"] Oct 11 05:01:16 crc kubenswrapper[4798]: E1011 05:01:16.448038 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6bf2054d-48d9-4b5a-a095-5e64f652a027" containerName="container-00" Oct 11 05:01:16 crc kubenswrapper[4798]: I1011 05:01:16.448052 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="6bf2054d-48d9-4b5a-a095-5e64f652a027" containerName="container-00" Oct 11 05:01:16 crc kubenswrapper[4798]: I1011 05:01:16.448260 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="6bf2054d-48d9-4b5a-a095-5e64f652a027" containerName="container-00" Oct 11 05:01:16 crc kubenswrapper[4798]: I1011 05:01:16.448985 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-5cm9g/crc-debug-xdjpq" Oct 11 05:01:16 crc kubenswrapper[4798]: I1011 05:01:16.451973 4798 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-must-gather-5cm9g"/"default-dockercfg-4sl4k" Oct 11 05:01:16 crc kubenswrapper[4798]: I1011 05:01:16.460495 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/89095172-e663-46e7-8a46-bdd347394132-host\") pod \"crc-debug-xdjpq\" (UID: \"89095172-e663-46e7-8a46-bdd347394132\") " pod="openshift-must-gather-5cm9g/crc-debug-xdjpq" Oct 11 05:01:16 crc kubenswrapper[4798]: I1011 05:01:16.460963 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hl2rs\" (UniqueName: \"kubernetes.io/projected/89095172-e663-46e7-8a46-bdd347394132-kube-api-access-hl2rs\") pod \"crc-debug-xdjpq\" (UID: \"89095172-e663-46e7-8a46-bdd347394132\") " pod="openshift-must-gather-5cm9g/crc-debug-xdjpq" Oct 11 05:01:16 crc kubenswrapper[4798]: I1011 05:01:16.563881 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/89095172-e663-46e7-8a46-bdd347394132-host\") pod \"crc-debug-xdjpq\" (UID: \"89095172-e663-46e7-8a46-bdd347394132\") " pod="openshift-must-gather-5cm9g/crc-debug-xdjpq" Oct 11 05:01:16 crc kubenswrapper[4798]: I1011 05:01:16.564427 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hl2rs\" (UniqueName: \"kubernetes.io/projected/89095172-e663-46e7-8a46-bdd347394132-kube-api-access-hl2rs\") pod \"crc-debug-xdjpq\" (UID: \"89095172-e663-46e7-8a46-bdd347394132\") " pod="openshift-must-gather-5cm9g/crc-debug-xdjpq" Oct 11 05:01:16 crc kubenswrapper[4798]: I1011 05:01:16.564448 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/89095172-e663-46e7-8a46-bdd347394132-host\") pod \"crc-debug-xdjpq\" (UID: \"89095172-e663-46e7-8a46-bdd347394132\") " pod="openshift-must-gather-5cm9g/crc-debug-xdjpq" Oct 11 05:01:16 crc kubenswrapper[4798]: I1011 05:01:16.592827 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hl2rs\" (UniqueName: \"kubernetes.io/projected/89095172-e663-46e7-8a46-bdd347394132-kube-api-access-hl2rs\") pod \"crc-debug-xdjpq\" (UID: \"89095172-e663-46e7-8a46-bdd347394132\") " pod="openshift-must-gather-5cm9g/crc-debug-xdjpq" Oct 11 05:01:16 crc kubenswrapper[4798]: I1011 05:01:16.776655 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-5cm9g/crc-debug-xdjpq" Oct 11 05:01:16 crc kubenswrapper[4798]: W1011 05:01:16.804704 4798 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod89095172_e663_46e7_8a46_bdd347394132.slice/crio-71605b511343c9d9181012a3c750e0706639edbc03e643a9552e46dab9842ac2 WatchSource:0}: Error finding container 71605b511343c9d9181012a3c750e0706639edbc03e643a9552e46dab9842ac2: Status 404 returned error can't find the container with id 71605b511343c9d9181012a3c750e0706639edbc03e643a9552e46dab9842ac2 Oct 11 05:01:17 crc kubenswrapper[4798]: I1011 05:01:17.118821 4798 generic.go:334] "Generic (PLEG): container finished" podID="89095172-e663-46e7-8a46-bdd347394132" containerID="57655497966dbe8d6682b54f644a93f79fcc8793cc81f20596e914ed1b346261" exitCode=0 Oct 11 05:01:17 crc kubenswrapper[4798]: I1011 05:01:17.118920 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-5cm9g/crc-debug-xdjpq" event={"ID":"89095172-e663-46e7-8a46-bdd347394132","Type":"ContainerDied","Data":"57655497966dbe8d6682b54f644a93f79fcc8793cc81f20596e914ed1b346261"} Oct 11 05:01:17 crc kubenswrapper[4798]: I1011 05:01:17.119272 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-5cm9g/crc-debug-xdjpq" event={"ID":"89095172-e663-46e7-8a46-bdd347394132","Type":"ContainerStarted","Data":"71605b511343c9d9181012a3c750e0706639edbc03e643a9552e46dab9842ac2"} Oct 11 05:01:17 crc kubenswrapper[4798]: I1011 05:01:17.171016 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-5cm9g/crc-debug-xdjpq"] Oct 11 05:01:17 crc kubenswrapper[4798]: I1011 05:01:17.180196 4798 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-5cm9g/crc-debug-xdjpq"] Oct 11 05:01:18 crc kubenswrapper[4798]: I1011 05:01:18.234927 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-5cm9g/crc-debug-xdjpq" Oct 11 05:01:18 crc kubenswrapper[4798]: I1011 05:01:18.406682 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hl2rs\" (UniqueName: \"kubernetes.io/projected/89095172-e663-46e7-8a46-bdd347394132-kube-api-access-hl2rs\") pod \"89095172-e663-46e7-8a46-bdd347394132\" (UID: \"89095172-e663-46e7-8a46-bdd347394132\") " Oct 11 05:01:18 crc kubenswrapper[4798]: I1011 05:01:18.406742 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/89095172-e663-46e7-8a46-bdd347394132-host\") pod \"89095172-e663-46e7-8a46-bdd347394132\" (UID: \"89095172-e663-46e7-8a46-bdd347394132\") " Oct 11 05:01:18 crc kubenswrapper[4798]: I1011 05:01:18.406863 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/89095172-e663-46e7-8a46-bdd347394132-host" (OuterVolumeSpecName: "host") pod "89095172-e663-46e7-8a46-bdd347394132" (UID: "89095172-e663-46e7-8a46-bdd347394132"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 11 05:01:18 crc kubenswrapper[4798]: I1011 05:01:18.407323 4798 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/89095172-e663-46e7-8a46-bdd347394132-host\") on node \"crc\" DevicePath \"\"" Oct 11 05:01:18 crc kubenswrapper[4798]: I1011 05:01:18.413128 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/89095172-e663-46e7-8a46-bdd347394132-kube-api-access-hl2rs" (OuterVolumeSpecName: "kube-api-access-hl2rs") pod "89095172-e663-46e7-8a46-bdd347394132" (UID: "89095172-e663-46e7-8a46-bdd347394132"). InnerVolumeSpecName "kube-api-access-hl2rs". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 05:01:18 crc kubenswrapper[4798]: I1011 05:01:18.510811 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hl2rs\" (UniqueName: \"kubernetes.io/projected/89095172-e663-46e7-8a46-bdd347394132-kube-api-access-hl2rs\") on node \"crc\" DevicePath \"\"" Oct 11 05:01:19 crc kubenswrapper[4798]: I1011 05:01:19.142033 4798 scope.go:117] "RemoveContainer" containerID="57655497966dbe8d6682b54f644a93f79fcc8793cc81f20596e914ed1b346261" Oct 11 05:01:19 crc kubenswrapper[4798]: I1011 05:01:19.142094 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-5cm9g/crc-debug-xdjpq" Oct 11 05:01:19 crc kubenswrapper[4798]: I1011 05:01:19.437287 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="89095172-e663-46e7-8a46-bdd347394132" path="/var/lib/kubelet/pods/89095172-e663-46e7-8a46-bdd347394132/volumes" Oct 11 05:01:24 crc kubenswrapper[4798]: I1011 05:01:24.675877 4798 scope.go:117] "RemoveContainer" containerID="2719b3cbd22c3fc963f31763151aa39a556a330e954cb5a8392d51bd71980006" Oct 11 05:01:24 crc kubenswrapper[4798]: I1011 05:01:24.699762 4798 scope.go:117] "RemoveContainer" containerID="2d5e3bbc4db91851838d6c3e19a820a15212baf498672037b67d05ce2575dd99" Oct 11 05:01:24 crc kubenswrapper[4798]: I1011 05:01:24.750019 4798 scope.go:117] "RemoveContainer" containerID="6837eaa8e60b27d5c42d5c6d75533620642989586fd991e5ec749c6be19ba4a5" Oct 11 05:01:27 crc kubenswrapper[4798]: I1011 05:01:27.138471 4798 patch_prober.go:28] interesting pod/machine-config-daemon-h28s2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 11 05:01:27 crc kubenswrapper[4798]: I1011 05:01:27.139029 4798 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 11 05:01:35 crc kubenswrapper[4798]: I1011 05:01:35.317737 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-7466b4ffd-mfb97_6f108baa-f968-4e0a-8227-8cc389329a06/barbican-api/0.log" Oct 11 05:01:35 crc kubenswrapper[4798]: I1011 05:01:35.329443 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-7466b4ffd-mfb97_6f108baa-f968-4e0a-8227-8cc389329a06/barbican-api-log/0.log" Oct 11 05:01:35 crc kubenswrapper[4798]: I1011 05:01:35.496046 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-6574767f6b-8p24k_141c65d6-e8c4-4ae3-be7b-7adf75193efc/barbican-keystone-listener/0.log" Oct 11 05:01:35 crc kubenswrapper[4798]: I1011 05:01:35.585658 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-6574767f6b-8p24k_141c65d6-e8c4-4ae3-be7b-7adf75193efc/barbican-keystone-listener-log/0.log" Oct 11 05:01:35 crc kubenswrapper[4798]: I1011 05:01:35.741178 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-55d589c759-9xthl_708609f6-405f-4f80-a2ed-e749a3803884/barbican-worker/0.log" Oct 11 05:01:35 crc kubenswrapper[4798]: I1011 05:01:35.807284 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-55d589c759-9xthl_708609f6-405f-4f80-a2ed-e749a3803884/barbican-worker-log/0.log" Oct 11 05:01:35 crc kubenswrapper[4798]: I1011 05:01:35.991717 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_bootstrap-edpm-deployment-openstack-edpm-ipam-msblf_6c616614-d70b-48a1-864f-df99b3bb33f3/bootstrap-edpm-deployment-openstack-edpm-ipam/0.log" Oct 11 05:01:36 crc kubenswrapper[4798]: I1011 05:01:36.214023 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_f1864cfd-61a1-4cb5-ae10-af539f50abc4/ceilometer-notification-agent/0.log" Oct 11 05:01:36 crc kubenswrapper[4798]: I1011 05:01:36.223977 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_f1864cfd-61a1-4cb5-ae10-af539f50abc4/ceilometer-central-agent/0.log" Oct 11 05:01:36 crc kubenswrapper[4798]: I1011 05:01:36.319868 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_f1864cfd-61a1-4cb5-ae10-af539f50abc4/proxy-httpd/0.log" Oct 11 05:01:36 crc kubenswrapper[4798]: I1011 05:01:36.402937 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_f1864cfd-61a1-4cb5-ae10-af539f50abc4/sg-core/0.log" Oct 11 05:01:36 crc kubenswrapper[4798]: I1011 05:01:36.575367 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceph-client-edpm-deployment-openstack-edpm-ipam-2gxwv_31303b8b-3524-40dd-9534-baccdc1d5a70/ceph-client-edpm-deployment-openstack-edpm-ipam/0.log" Oct 11 05:01:36 crc kubenswrapper[4798]: I1011 05:01:36.741954 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-xjwr9_29216d22-5411-4bbd-b2c3-f643df3218c0/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam/0.log" Oct 11 05:01:36 crc kubenswrapper[4798]: I1011 05:01:36.950337 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_b454f77b-5618-4f7e-b603-9fec6030c732/cinder-api/0.log" Oct 11 05:01:36 crc kubenswrapper[4798]: I1011 05:01:36.990865 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_b454f77b-5618-4f7e-b603-9fec6030c732/cinder-api-log/0.log" Oct 11 05:01:37 crc kubenswrapper[4798]: I1011 05:01:37.253085 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-backup-0_f9dae869-d1e5-48c4-a08c-5f9e76f8a581/probe/0.log" Oct 11 05:01:37 crc kubenswrapper[4798]: I1011 05:01:37.517127 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-backup-0_f9dae869-d1e5-48c4-a08c-5f9e76f8a581/cinder-backup/0.log" Oct 11 05:01:37 crc kubenswrapper[4798]: I1011 05:01:37.758809 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_b88dab9f-9394-41b9-a314-ce7e36e021d8/cinder-scheduler/0.log" Oct 11 05:01:37 crc kubenswrapper[4798]: I1011 05:01:37.852389 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_b88dab9f-9394-41b9-a314-ce7e36e021d8/probe/0.log" Oct 11 05:01:38 crc kubenswrapper[4798]: I1011 05:01:38.133011 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-volume-volume1-0_6d69ada7-5b46-4e22-b304-8ca7957976c5/probe/0.log" Oct 11 05:01:38 crc kubenswrapper[4798]: I1011 05:01:38.151191 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-volume-volume1-0_6d69ada7-5b46-4e22-b304-8ca7957976c5/cinder-volume/0.log" Oct 11 05:01:38 crc kubenswrapper[4798]: I1011 05:01:38.353983 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_configure-network-edpm-deployment-openstack-edpm-ipam-dqnkj_c1de1dc5-3b9c-43b2-bb27-3f8e20b9f8c6/configure-network-edpm-deployment-openstack-edpm-ipam/0.log" Oct 11 05:01:38 crc kubenswrapper[4798]: I1011 05:01:38.506317 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_configure-os-edpm-deployment-openstack-edpm-ipam-8vffc_bcf6a25c-81f9-4a1f-bf5e-9c15b703c682/configure-os-edpm-deployment-openstack-edpm-ipam/0.log" Oct 11 05:01:38 crc kubenswrapper[4798]: I1011 05:01:38.644649 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-69655fd4bf-bvxl4_d5af4cb1-380b-4a81-87ff-b1eb52952273/init/0.log" Oct 11 05:01:38 crc kubenswrapper[4798]: I1011 05:01:38.802142 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-69655fd4bf-bvxl4_d5af4cb1-380b-4a81-87ff-b1eb52952273/init/0.log" Oct 11 05:01:38 crc kubenswrapper[4798]: I1011 05:01:38.885100 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-69655fd4bf-bvxl4_d5af4cb1-380b-4a81-87ff-b1eb52952273/dnsmasq-dns/0.log" Oct 11 05:01:38 crc kubenswrapper[4798]: I1011 05:01:38.894089 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_4207ecba-8fbe-4317-9292-8b7fd0d4df8c/glance-httpd/0.log" Oct 11 05:01:38 crc kubenswrapper[4798]: I1011 05:01:38.993609 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_4207ecba-8fbe-4317-9292-8b7fd0d4df8c/glance-log/0.log" Oct 11 05:01:39 crc kubenswrapper[4798]: I1011 05:01:39.095549 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_53c9a7f3-ce03-406a-8c8a-56f59838bf6b/glance-log/0.log" Oct 11 05:01:39 crc kubenswrapper[4798]: I1011 05:01:39.102255 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_53c9a7f3-ce03-406a-8c8a-56f59838bf6b/glance-httpd/0.log" Oct 11 05:01:39 crc kubenswrapper[4798]: I1011 05:01:39.457522 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_horizon-7b5998ffb-nn2rn_a3fe5ce5-561f-4775-8024-6c4896079b30/horizon/0.log" Oct 11 05:01:39 crc kubenswrapper[4798]: I1011 05:01:39.471021 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_horizon-7b5998ffb-nn2rn_a3fe5ce5-561f-4775-8024-6c4896079b30/horizon-log/0.log" Oct 11 05:01:39 crc kubenswrapper[4798]: I1011 05:01:39.498338 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_install-certs-edpm-deployment-openstack-edpm-ipam-qgt9f_5f08cfce-c132-471d-b2c8-e725c0c5b7ec/install-certs-edpm-deployment-openstack-edpm-ipam/0.log" Oct 11 05:01:39 crc kubenswrapper[4798]: I1011 05:01:39.663137 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_install-os-edpm-deployment-openstack-edpm-ipam-4fcfm_c8977d13-c86f-494d-a02e-b617e5e27fdb/install-os-edpm-deployment-openstack-edpm-ipam/0.log" Oct 11 05:01:39 crc kubenswrapper[4798]: I1011 05:01:39.849985 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-bf585fdff-grnph_0d75e3b2-e53b-4823-9272-b7038c8b379c/keystone-api/0.log" Oct 11 05:01:39 crc kubenswrapper[4798]: I1011 05:01:39.867680 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-cron-29335981-c7vhp_1c359100-3221-4180-9971-65bba2660d15/keystone-cron/0.log" Oct 11 05:01:40 crc kubenswrapper[4798]: I1011 05:01:40.038583 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_kube-state-metrics-0_a0b5cce2-9d60-4726-9660-bac9df48774f/kube-state-metrics/0.log" Oct 11 05:01:40 crc kubenswrapper[4798]: I1011 05:01:40.106191 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_libvirt-edpm-deployment-openstack-edpm-ipam-649lj_8550f028-bbd9-455c-9c93-0e813f2a95ed/libvirt-edpm-deployment-openstack-edpm-ipam/0.log" Oct 11 05:01:40 crc kubenswrapper[4798]: I1011 05:01:40.274113 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_manila-api-0_5a696dfc-1e8c-4d2b-9f0e-70bea3ed6d01/manila-api-log/0.log" Oct 11 05:01:40 crc kubenswrapper[4798]: I1011 05:01:40.407331 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_manila-api-0_5a696dfc-1e8c-4d2b-9f0e-70bea3ed6d01/manila-api/0.log" Oct 11 05:01:40 crc kubenswrapper[4798]: I1011 05:01:40.484283 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_manila-scheduler-0_8a3790b4-be7a-434f-90c1-3c0b6623b2a5/probe/0.log" Oct 11 05:01:40 crc kubenswrapper[4798]: I1011 05:01:40.497153 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_manila-scheduler-0_8a3790b4-be7a-434f-90c1-3c0b6623b2a5/manila-scheduler/0.log" Oct 11 05:01:40 crc kubenswrapper[4798]: I1011 05:01:40.661556 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_manila-share-share1-0_49ae1ecc-49d1-4cea-b7c6-2eb0cc0eb582/manila-share/0.log" Oct 11 05:01:40 crc kubenswrapper[4798]: I1011 05:01:40.708466 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_manila-share-share1-0_49ae1ecc-49d1-4cea-b7c6-2eb0cc0eb582/probe/0.log" Oct 11 05:01:40 crc kubenswrapper[4798]: I1011 05:01:40.984533 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-5744f55857-rnj42_985c3452-9836-40f4-8b42-8082b04ffba3/neutron-api/0.log" Oct 11 05:01:41 crc kubenswrapper[4798]: I1011 05:01:41.025722 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-5744f55857-rnj42_985c3452-9836-40f4-8b42-8082b04ffba3/neutron-httpd/0.log" Oct 11 05:01:41 crc kubenswrapper[4798]: I1011 05:01:41.266730 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-metadata-edpm-deployment-openstack-edpm-ipam-k6ztk_ad721bd6-8a4d-47c7-b544-4d6c480d6fd2/neutron-metadata-edpm-deployment-openstack-edpm-ipam/0.log" Oct 11 05:01:41 crc kubenswrapper[4798]: I1011 05:01:41.679875 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_3498eff7-9fc6-42de-ab90-0df1ab533041/nova-api-log/0.log" Oct 11 05:01:42 crc kubenswrapper[4798]: I1011 05:01:42.139625 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_3498eff7-9fc6-42de-ab90-0df1ab533041/nova-api-api/0.log" Oct 11 05:01:42 crc kubenswrapper[4798]: I1011 05:01:42.328654 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell0-conductor-0_759110c9-4bac-404b-a088-4c0cd6c63d17/nova-cell0-conductor-conductor/0.log" Oct 11 05:01:42 crc kubenswrapper[4798]: I1011 05:01:42.534234 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-conductor-0_0206c382-c230-4d24-8d8e-7744bc1c1209/nova-cell1-conductor-conductor/0.log" Oct 11 05:01:42 crc kubenswrapper[4798]: I1011 05:01:42.691080 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-novncproxy-0_ba9b4b7f-46a9-4e6e-9c35-4a114df18a64/nova-cell1-novncproxy-novncproxy/0.log" Oct 11 05:01:42 crc kubenswrapper[4798]: I1011 05:01:42.841165 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-p4572_1a79dbc6-3c66-4728-8984-fecebe3eb6f6/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam/0.log" Oct 11 05:01:43 crc kubenswrapper[4798]: I1011 05:01:43.034756 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_9fa916be-a581-493a-ade0-f5683c1d31e1/nova-metadata-log/0.log" Oct 11 05:01:43 crc kubenswrapper[4798]: I1011 05:01:43.367677 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_memcached-0_a3433fc1-acd1-4b1e-9df2-578848000615/memcached/0.log" Oct 11 05:01:43 crc kubenswrapper[4798]: I1011 05:01:43.498998 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-scheduler-0_88bd4a12-3afa-47ca-8954-e83a63f635d9/nova-scheduler-scheduler/0.log" Oct 11 05:01:43 crc kubenswrapper[4798]: I1011 05:01:43.607471 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_2d1fad00-1405-4149-96d6-7ef60d34c4f1/mysql-bootstrap/0.log" Oct 11 05:01:43 crc kubenswrapper[4798]: I1011 05:01:43.854621 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_2d1fad00-1405-4149-96d6-7ef60d34c4f1/mysql-bootstrap/0.log" Oct 11 05:01:43 crc kubenswrapper[4798]: I1011 05:01:43.922840 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_2d1fad00-1405-4149-96d6-7ef60d34c4f1/galera/0.log" Oct 11 05:01:44 crc kubenswrapper[4798]: I1011 05:01:44.097385 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_54e4a0f3-c43f-418e-b105-fd19db3ce615/mysql-bootstrap/0.log" Oct 11 05:01:44 crc kubenswrapper[4798]: I1011 05:01:44.266021 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_54e4a0f3-c43f-418e-b105-fd19db3ce615/galera/0.log" Oct 11 05:01:44 crc kubenswrapper[4798]: I1011 05:01:44.278882 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_9fa916be-a581-493a-ade0-f5683c1d31e1/nova-metadata-metadata/0.log" Oct 11 05:01:44 crc kubenswrapper[4798]: I1011 05:01:44.310761 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_54e4a0f3-c43f-418e-b105-fd19db3ce615/mysql-bootstrap/0.log" Oct 11 05:01:44 crc kubenswrapper[4798]: I1011 05:01:44.467725 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstackclient_0ecd7207-e032-4433-9077-b023a4c891f3/openstackclient/0.log" Oct 11 05:01:44 crc kubenswrapper[4798]: I1011 05:01:44.528572 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-metrics-m5tjs_18b39a28-1973-440d-8294-c9aa434cd56d/openstack-network-exporter/0.log" Oct 11 05:01:44 crc kubenswrapper[4798]: I1011 05:01:44.675820 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-tk4gr_7f9da136-ab4f-4a2e-99e4-e546aa4220f9/ovsdb-server-init/0.log" Oct 11 05:01:44 crc kubenswrapper[4798]: I1011 05:01:44.825561 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-tk4gr_7f9da136-ab4f-4a2e-99e4-e546aa4220f9/ovsdb-server/0.log" Oct 11 05:01:44 crc kubenswrapper[4798]: I1011 05:01:44.827577 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-tk4gr_7f9da136-ab4f-4a2e-99e4-e546aa4220f9/ovsdb-server-init/0.log" Oct 11 05:01:44 crc kubenswrapper[4798]: I1011 05:01:44.829442 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-tk4gr_7f9da136-ab4f-4a2e-99e4-e546aa4220f9/ovs-vswitchd/0.log" Oct 11 05:01:45 crc kubenswrapper[4798]: I1011 05:01:45.015867 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-edpm-deployment-openstack-edpm-ipam-vmqz7_08139862-0611-43b8-93e1-c5d9cd73e4d5/ovn-edpm-deployment-openstack-edpm-ipam/0.log" Oct 11 05:01:45 crc kubenswrapper[4798]: I1011 05:01:45.041829 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-sv678_3e260c01-682b-4b79-9ebe-c06c29750bfe/ovn-controller/0.log" Oct 11 05:01:45 crc kubenswrapper[4798]: I1011 05:01:45.192743 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_2c3825ca-11d1-4b60-b0c5-2f4b80a7fd01/openstack-network-exporter/0.log" Oct 11 05:01:45 crc kubenswrapper[4798]: I1011 05:01:45.224116 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_2c3825ca-11d1-4b60-b0c5-2f4b80a7fd01/ovn-northd/0.log" Oct 11 05:01:45 crc kubenswrapper[4798]: I1011 05:01:45.339036 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_5fcb3fcb-1839-493f-b485-0bb9ea061bc2/openstack-network-exporter/0.log" Oct 11 05:01:45 crc kubenswrapper[4798]: I1011 05:01:45.407785 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_5fcb3fcb-1839-493f-b485-0bb9ea061bc2/ovsdbserver-nb/0.log" Oct 11 05:01:45 crc kubenswrapper[4798]: I1011 05:01:45.503930 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_76d0b2fe-e594-4a66-8358-d3aed19300ea/openstack-network-exporter/0.log" Oct 11 05:01:45 crc kubenswrapper[4798]: I1011 05:01:45.591106 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_76d0b2fe-e594-4a66-8358-d3aed19300ea/ovsdbserver-sb/0.log" Oct 11 05:01:45 crc kubenswrapper[4798]: I1011 05:01:45.680094 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-c67f5f784-ph78z_0843bdb2-fc67-4c61-991e-383ebdb67136/placement-api/0.log" Oct 11 05:01:45 crc kubenswrapper[4798]: I1011 05:01:45.780053 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-c67f5f784-ph78z_0843bdb2-fc67-4c61-991e-383ebdb67136/placement-log/0.log" Oct 11 05:01:45 crc kubenswrapper[4798]: I1011 05:01:45.916440 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_cdfee9bc-39ae-48a4-9645-7191a2ae2cd5/setup-container/0.log" Oct 11 05:01:46 crc kubenswrapper[4798]: I1011 05:01:46.041589 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_cdfee9bc-39ae-48a4-9645-7191a2ae2cd5/rabbitmq/0.log" Oct 11 05:01:46 crc kubenswrapper[4798]: I1011 05:01:46.041766 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_cdfee9bc-39ae-48a4-9645-7191a2ae2cd5/setup-container/0.log" Oct 11 05:01:46 crc kubenswrapper[4798]: I1011 05:01:46.121721 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_c95f2c7f-03e4-4ded-8ff7-8d7804dae8fe/setup-container/0.log" Oct 11 05:01:46 crc kubenswrapper[4798]: I1011 05:01:46.358358 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_c95f2c7f-03e4-4ded-8ff7-8d7804dae8fe/setup-container/0.log" Oct 11 05:01:46 crc kubenswrapper[4798]: I1011 05:01:46.369101 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_reboot-os-edpm-deployment-openstack-edpm-ipam-qthb4_c7e25630-2ac6-435f-bba0-281aaee8a48d/reboot-os-edpm-deployment-openstack-edpm-ipam/0.log" Oct 11 05:01:46 crc kubenswrapper[4798]: I1011 05:01:46.390864 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_c95f2c7f-03e4-4ded-8ff7-8d7804dae8fe/rabbitmq/0.log" Oct 11 05:01:46 crc kubenswrapper[4798]: I1011 05:01:46.797196 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_repo-setup-edpm-deployment-openstack-edpm-ipam-hl6zr_471b7b25-2875-473e-bb6e-0509b527b7d3/repo-setup-edpm-deployment-openstack-edpm-ipam/0.log" Oct 11 05:01:46 crc kubenswrapper[4798]: I1011 05:01:46.823753 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_run-os-edpm-deployment-openstack-edpm-ipam-s9wvs_230b772a-a9d9-4318-ada4-a83901d636b5/run-os-edpm-deployment-openstack-edpm-ipam/0.log" Oct 11 05:01:47 crc kubenswrapper[4798]: I1011 05:01:47.015711 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ssh-known-hosts-edpm-deployment-rznrc_e75ed635-7172-447f-ab1b-b9af46d15be1/ssh-known-hosts-edpm-deployment/0.log" Oct 11 05:01:47 crc kubenswrapper[4798]: I1011 05:01:47.066637 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_tempest-tests-tempest_b3b2c8d8-3245-48a3-be3a-099046cf7258/tempest-tests-tempest-tests-runner/0.log" Oct 11 05:01:47 crc kubenswrapper[4798]: I1011 05:01:47.239078 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_test-operator-logs-pod-tempest-tempest-tests-tempest_b522303e-6696-4b73-87a2-127b91d444af/test-operator-logs-container/0.log" Oct 11 05:01:47 crc kubenswrapper[4798]: I1011 05:01:47.291691 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_validate-network-edpm-deployment-openstack-edpm-ipam-bb949_57b977e1-ad34-4a35-a31e-fe6e1a0b32ee/validate-network-edpm-deployment-openstack-edpm-ipam/0.log" Oct 11 05:01:57 crc kubenswrapper[4798]: I1011 05:01:57.139222 4798 patch_prober.go:28] interesting pod/machine-config-daemon-h28s2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 11 05:01:57 crc kubenswrapper[4798]: I1011 05:01:57.140036 4798 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 11 05:02:10 crc kubenswrapper[4798]: I1011 05:02:10.415598 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_712f67a2d8a6423b4809894d753e10e23e7b19dde1bc675906c8bdb3c5h4g77_8fb1be05-a9d3-479c-880e-9e7c73efac29/util/0.log" Oct 11 05:02:10 crc kubenswrapper[4798]: I1011 05:02:10.566127 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_712f67a2d8a6423b4809894d753e10e23e7b19dde1bc675906c8bdb3c5h4g77_8fb1be05-a9d3-479c-880e-9e7c73efac29/util/0.log" Oct 11 05:02:10 crc kubenswrapper[4798]: I1011 05:02:10.616093 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_712f67a2d8a6423b4809894d753e10e23e7b19dde1bc675906c8bdb3c5h4g77_8fb1be05-a9d3-479c-880e-9e7c73efac29/pull/0.log" Oct 11 05:02:10 crc kubenswrapper[4798]: I1011 05:02:10.630006 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_712f67a2d8a6423b4809894d753e10e23e7b19dde1bc675906c8bdb3c5h4g77_8fb1be05-a9d3-479c-880e-9e7c73efac29/pull/0.log" Oct 11 05:02:10 crc kubenswrapper[4798]: I1011 05:02:10.787918 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_712f67a2d8a6423b4809894d753e10e23e7b19dde1bc675906c8bdb3c5h4g77_8fb1be05-a9d3-479c-880e-9e7c73efac29/util/0.log" Oct 11 05:02:10 crc kubenswrapper[4798]: I1011 05:02:10.788250 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_712f67a2d8a6423b4809894d753e10e23e7b19dde1bc675906c8bdb3c5h4g77_8fb1be05-a9d3-479c-880e-9e7c73efac29/pull/0.log" Oct 11 05:02:10 crc kubenswrapper[4798]: I1011 05:02:10.856979 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_712f67a2d8a6423b4809894d753e10e23e7b19dde1bc675906c8bdb3c5h4g77_8fb1be05-a9d3-479c-880e-9e7c73efac29/extract/0.log" Oct 11 05:02:10 crc kubenswrapper[4798]: I1011 05:02:10.999093 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-64f84fcdbb-pvqqn_7c70459a-7eb7-45e3-b6c0-e2b3c01e3cb7/kube-rbac-proxy/0.log" Oct 11 05:02:11 crc kubenswrapper[4798]: I1011 05:02:11.086006 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-64f84fcdbb-pvqqn_7c70459a-7eb7-45e3-b6c0-e2b3c01e3cb7/manager/0.log" Oct 11 05:02:11 crc kubenswrapper[4798]: I1011 05:02:11.141511 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-59cdc64769-9cgvk_755c871f-8030-429f-9b9f-5b1a6c1e24e0/kube-rbac-proxy/0.log" Oct 11 05:02:11 crc kubenswrapper[4798]: I1011 05:02:11.269672 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-59cdc64769-9cgvk_755c871f-8030-429f-9b9f-5b1a6c1e24e0/manager/0.log" Oct 11 05:02:11 crc kubenswrapper[4798]: I1011 05:02:11.339979 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-687df44cdb-x5n77_2c18a2f7-9a42-495c-bc9c-86750a381f5c/kube-rbac-proxy/0.log" Oct 11 05:02:11 crc kubenswrapper[4798]: I1011 05:02:11.416141 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-687df44cdb-x5n77_2c18a2f7-9a42-495c-bc9c-86750a381f5c/manager/0.log" Oct 11 05:02:11 crc kubenswrapper[4798]: I1011 05:02:11.567205 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-7f5fc6b5ff-4zk9f_4490c7b3-a500-4224-8597-abd71de4fa13/kube-rbac-proxy/0.log" Oct 11 05:02:11 crc kubenswrapper[4798]: I1011 05:02:11.750581 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-7f5fc6b5ff-4zk9f_4490c7b3-a500-4224-8597-abd71de4fa13/manager/0.log" Oct 11 05:02:11 crc kubenswrapper[4798]: I1011 05:02:11.896353 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-6d9967f8dd-lgszb_cc1b32c4-acec-47f4-95ce-88763c33ca81/manager/0.log" Oct 11 05:02:11 crc kubenswrapper[4798]: I1011 05:02:11.919919 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-6d9967f8dd-lgszb_cc1b32c4-acec-47f4-95ce-88763c33ca81/kube-rbac-proxy/0.log" Oct 11 05:02:12 crc kubenswrapper[4798]: I1011 05:02:12.051627 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-6d74794d9b-vpg7r_332f7594-84b2-4761-a067-a32c31469e4f/kube-rbac-proxy/0.log" Oct 11 05:02:12 crc kubenswrapper[4798]: I1011 05:02:12.098624 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-6d74794d9b-vpg7r_332f7594-84b2-4761-a067-a32c31469e4f/manager/0.log" Oct 11 05:02:12 crc kubenswrapper[4798]: I1011 05:02:12.197031 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-585fc5b659-zsdqq_01834c42-7ee0-4576-9136-5812fc37e1cc/kube-rbac-proxy/0.log" Oct 11 05:02:12 crc kubenswrapper[4798]: I1011 05:02:12.410654 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-585fc5b659-zsdqq_01834c42-7ee0-4576-9136-5812fc37e1cc/manager/0.log" Oct 11 05:02:12 crc kubenswrapper[4798]: I1011 05:02:12.418284 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-74cb5cbc49-p96zk_a168d198-c58b-4674-8286-72fb306036b2/kube-rbac-proxy/0.log" Oct 11 05:02:12 crc kubenswrapper[4798]: I1011 05:02:12.438164 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-74cb5cbc49-p96zk_a168d198-c58b-4674-8286-72fb306036b2/manager/0.log" Oct 11 05:02:12 crc kubenswrapper[4798]: I1011 05:02:12.572869 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-ddb98f99b-npmgn_2fe367a2-f6f5-47ea-97a5-75a8b79778fb/kube-rbac-proxy/0.log" Oct 11 05:02:12 crc kubenswrapper[4798]: I1011 05:02:12.702310 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-ddb98f99b-npmgn_2fe367a2-f6f5-47ea-97a5-75a8b79778fb/manager/0.log" Oct 11 05:02:12 crc kubenswrapper[4798]: I1011 05:02:12.832871 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-59578bc799-7kfp8_2448a4a6-cf4c-45ec-89cc-25621f444988/kube-rbac-proxy/0.log" Oct 11 05:02:12 crc kubenswrapper[4798]: I1011 05:02:12.886150 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-59578bc799-7kfp8_2448a4a6-cf4c-45ec-89cc-25621f444988/manager/0.log" Oct 11 05:02:12 crc kubenswrapper[4798]: I1011 05:02:12.904102 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-5777b4f897-l86k6_e9fb9720-fc76-47a8-b111-63b5bc2899da/kube-rbac-proxy/0.log" Oct 11 05:02:13 crc kubenswrapper[4798]: I1011 05:02:13.098018 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-5777b4f897-l86k6_e9fb9720-fc76-47a8-b111-63b5bc2899da/manager/0.log" Oct 11 05:02:13 crc kubenswrapper[4798]: I1011 05:02:13.135303 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-797d478b46-rwhzb_48b2c137-9726-4e99-b0ca-a3cdb3bc6a1b/kube-rbac-proxy/0.log" Oct 11 05:02:13 crc kubenswrapper[4798]: I1011 05:02:13.174663 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-797d478b46-rwhzb_48b2c137-9726-4e99-b0ca-a3cdb3bc6a1b/manager/0.log" Oct 11 05:02:13 crc kubenswrapper[4798]: I1011 05:02:13.353534 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-57bb74c7bf-6rfxm_7f1693fd-2ec7-4047-a2df-ffa5d7c94e67/kube-rbac-proxy/0.log" Oct 11 05:02:13 crc kubenswrapper[4798]: I1011 05:02:13.461379 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-57bb74c7bf-6rfxm_7f1693fd-2ec7-4047-a2df-ffa5d7c94e67/manager/0.log" Oct 11 05:02:13 crc kubenswrapper[4798]: I1011 05:02:13.577072 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-6d7c7ddf95-l9z67_13f1e30f-4d2b-4517-86ae-8dc8334b0841/kube-rbac-proxy/0.log" Oct 11 05:02:13 crc kubenswrapper[4798]: I1011 05:02:13.582648 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-6d7c7ddf95-l9z67_13f1e30f-4d2b-4517-86ae-8dc8334b0841/manager/0.log" Oct 11 05:02:13 crc kubenswrapper[4798]: I1011 05:02:13.694642 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-6cc7fb757dqqlc4_60303330-3699-43be-ba28-96cd788e1cf0/kube-rbac-proxy/0.log" Oct 11 05:02:13 crc kubenswrapper[4798]: I1011 05:02:13.770204 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-6cc7fb757dqqlc4_60303330-3699-43be-ba28-96cd788e1cf0/manager/0.log" Oct 11 05:02:13 crc kubenswrapper[4798]: I1011 05:02:13.913971 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-manager-7d8c4595d6-d6fzt_a0f0b4cf-9851-4d84-81e0-7130b9777793/kube-rbac-proxy/0.log" Oct 11 05:02:14 crc kubenswrapper[4798]: I1011 05:02:14.041377 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-operator-69766b5fb5-qk5q4_6e988a50-121d-47e0-ad52-8a1139fc8ad6/kube-rbac-proxy/0.log" Oct 11 05:02:14 crc kubenswrapper[4798]: I1011 05:02:14.266349 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-operator-69766b5fb5-qk5q4_6e988a50-121d-47e0-ad52-8a1139fc8ad6/operator/0.log" Oct 11 05:02:14 crc kubenswrapper[4798]: I1011 05:02:14.307012 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-index-nkfzg_18c8d96d-3c6b-480f-bea7-1bfbc2d871e3/registry-server/0.log" Oct 11 05:02:14 crc kubenswrapper[4798]: I1011 05:02:14.418480 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-869cc7797f-t29x4_9b7c0cf6-604f-43aa-ad27-0a2f54507751/kube-rbac-proxy/0.log" Oct 11 05:02:14 crc kubenswrapper[4798]: I1011 05:02:14.617532 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-869cc7797f-t29x4_9b7c0cf6-604f-43aa-ad27-0a2f54507751/manager/0.log" Oct 11 05:02:14 crc kubenswrapper[4798]: I1011 05:02:14.657603 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-664664cb68-jkj5p_d7c1b935-ea3d-4d7b-96ab-5eb98fbaaea8/kube-rbac-proxy/0.log" Oct 11 05:02:14 crc kubenswrapper[4798]: I1011 05:02:14.698562 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-664664cb68-jkj5p_d7c1b935-ea3d-4d7b-96ab-5eb98fbaaea8/manager/0.log" Oct 11 05:02:14 crc kubenswrapper[4798]: I1011 05:02:14.937751 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_rabbitmq-cluster-operator-manager-5f97d8c699-8h9p4_a368474b-c03b-43b2-96af-0bb78d9f6ee6/operator/0.log" Oct 11 05:02:14 crc kubenswrapper[4798]: I1011 05:02:14.964508 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-5f4d5dfdc6-fffgl_f0952c0e-9c58-446c-90eb-61c0f8f4d64a/kube-rbac-proxy/0.log" Oct 11 05:02:15 crc kubenswrapper[4798]: I1011 05:02:15.105573 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-5f4d5dfdc6-fffgl_f0952c0e-9c58-446c-90eb-61c0f8f4d64a/manager/0.log" Oct 11 05:02:15 crc kubenswrapper[4798]: I1011 05:02:15.240216 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-578874c84d-zg2f8_0aecec3e-5bdb-4da4-b0a5-b3f9c7512a94/kube-rbac-proxy/0.log" Oct 11 05:02:15 crc kubenswrapper[4798]: I1011 05:02:15.274785 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-manager-7d8c4595d6-d6fzt_a0f0b4cf-9851-4d84-81e0-7130b9777793/manager/0.log" Oct 11 05:02:15 crc kubenswrapper[4798]: I1011 05:02:15.318718 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-578874c84d-zg2f8_0aecec3e-5bdb-4da4-b0a5-b3f9c7512a94/manager/0.log" Oct 11 05:02:15 crc kubenswrapper[4798]: I1011 05:02:15.454903 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-ffcdd6c94-4t4ft_2c332094-6f91-4065-829a-736e42bd6560/kube-rbac-proxy/0.log" Oct 11 05:02:15 crc kubenswrapper[4798]: I1011 05:02:15.465497 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-ffcdd6c94-4t4ft_2c332094-6f91-4065-829a-736e42bd6560/manager/0.log" Oct 11 05:02:15 crc kubenswrapper[4798]: I1011 05:02:15.524432 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-646675d848-68hdd_eb92bfae-ca3f-4e70-9fa1-440a6154cb53/kube-rbac-proxy/0.log" Oct 11 05:02:15 crc kubenswrapper[4798]: I1011 05:02:15.635155 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-646675d848-68hdd_eb92bfae-ca3f-4e70-9fa1-440a6154cb53/manager/0.log" Oct 11 05:02:24 crc kubenswrapper[4798]: I1011 05:02:24.925578 4798 scope.go:117] "RemoveContainer" containerID="cd6cc40464096d3d36577b64feeb77499d4710bfbc6c22cfc2336b869a9f67a7" Oct 11 05:02:27 crc kubenswrapper[4798]: I1011 05:02:27.138996 4798 patch_prober.go:28] interesting pod/machine-config-daemon-h28s2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 11 05:02:27 crc kubenswrapper[4798]: I1011 05:02:27.140997 4798 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 11 05:02:27 crc kubenswrapper[4798]: I1011 05:02:27.141129 4798 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" Oct 11 05:02:27 crc kubenswrapper[4798]: I1011 05:02:27.142137 4798 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"1547835e5b38aaac070151fdc3778d195594e548ec1f9e8578c50470ab453c91"} pod="openshift-machine-config-operator/machine-config-daemon-h28s2" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Oct 11 05:02:27 crc kubenswrapper[4798]: I1011 05:02:27.142493 4798 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" containerName="machine-config-daemon" containerID="cri-o://1547835e5b38aaac070151fdc3778d195594e548ec1f9e8578c50470ab453c91" gracePeriod=600 Oct 11 05:02:27 crc kubenswrapper[4798]: I1011 05:02:27.809782 4798 generic.go:334] "Generic (PLEG): container finished" podID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" containerID="1547835e5b38aaac070151fdc3778d195594e548ec1f9e8578c50470ab453c91" exitCode=0 Oct 11 05:02:27 crc kubenswrapper[4798]: I1011 05:02:27.809985 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" event={"ID":"42571bc8-2186-4e3b-bba9-28f5a8f364d0","Type":"ContainerDied","Data":"1547835e5b38aaac070151fdc3778d195594e548ec1f9e8578c50470ab453c91"} Oct 11 05:02:27 crc kubenswrapper[4798]: I1011 05:02:27.810579 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" event={"ID":"42571bc8-2186-4e3b-bba9-28f5a8f364d0","Type":"ContainerStarted","Data":"d07d7b07048403baa93d3c75c790ba19ef9dcde5c3340cac72f6e569b4729385"} Oct 11 05:02:27 crc kubenswrapper[4798]: I1011 05:02:27.810613 4798 scope.go:117] "RemoveContainer" containerID="115ccad5a85066a43a5690fe450fc2e02e8e332700c139e6cb8f105f9866a92d" Oct 11 05:02:31 crc kubenswrapper[4798]: I1011 05:02:31.711123 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_control-plane-machine-set-operator-78cbb6b69f-jspml_f26ca59f-fc9c-4cbd-9c82-bf0b756f9b88/control-plane-machine-set-operator/0.log" Oct 11 05:02:31 crc kubenswrapper[4798]: I1011 05:02:31.866100 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-wqjfc_29c4d9ea-078d-4ed3-a56a-c0a29887b6a1/kube-rbac-proxy/0.log" Oct 11 05:02:31 crc kubenswrapper[4798]: I1011 05:02:31.904279 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-wqjfc_29c4d9ea-078d-4ed3-a56a-c0a29887b6a1/machine-api-operator/0.log" Oct 11 05:02:37 crc kubenswrapper[4798]: I1011 05:02:37.422699 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-78n8f"] Oct 11 05:02:37 crc kubenswrapper[4798]: E1011 05:02:37.423781 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="89095172-e663-46e7-8a46-bdd347394132" containerName="container-00" Oct 11 05:02:37 crc kubenswrapper[4798]: I1011 05:02:37.423799 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="89095172-e663-46e7-8a46-bdd347394132" containerName="container-00" Oct 11 05:02:37 crc kubenswrapper[4798]: I1011 05:02:37.424059 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="89095172-e663-46e7-8a46-bdd347394132" containerName="container-00" Oct 11 05:02:37 crc kubenswrapper[4798]: I1011 05:02:37.425969 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-78n8f" Oct 11 05:02:37 crc kubenswrapper[4798]: I1011 05:02:37.442363 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-78n8f"] Oct 11 05:02:37 crc kubenswrapper[4798]: I1011 05:02:37.468095 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f6131f05-c4b1-4405-8b72-400d996e671d-catalog-content\") pod \"redhat-operators-78n8f\" (UID: \"f6131f05-c4b1-4405-8b72-400d996e671d\") " pod="openshift-marketplace/redhat-operators-78n8f" Oct 11 05:02:37 crc kubenswrapper[4798]: I1011 05:02:37.468719 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pcj87\" (UniqueName: \"kubernetes.io/projected/f6131f05-c4b1-4405-8b72-400d996e671d-kube-api-access-pcj87\") pod \"redhat-operators-78n8f\" (UID: \"f6131f05-c4b1-4405-8b72-400d996e671d\") " pod="openshift-marketplace/redhat-operators-78n8f" Oct 11 05:02:37 crc kubenswrapper[4798]: I1011 05:02:37.469279 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f6131f05-c4b1-4405-8b72-400d996e671d-utilities\") pod \"redhat-operators-78n8f\" (UID: \"f6131f05-c4b1-4405-8b72-400d996e671d\") " pod="openshift-marketplace/redhat-operators-78n8f" Oct 11 05:02:37 crc kubenswrapper[4798]: I1011 05:02:37.571758 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f6131f05-c4b1-4405-8b72-400d996e671d-utilities\") pod \"redhat-operators-78n8f\" (UID: \"f6131f05-c4b1-4405-8b72-400d996e671d\") " pod="openshift-marketplace/redhat-operators-78n8f" Oct 11 05:02:37 crc kubenswrapper[4798]: I1011 05:02:37.571880 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f6131f05-c4b1-4405-8b72-400d996e671d-catalog-content\") pod \"redhat-operators-78n8f\" (UID: \"f6131f05-c4b1-4405-8b72-400d996e671d\") " pod="openshift-marketplace/redhat-operators-78n8f" Oct 11 05:02:37 crc kubenswrapper[4798]: I1011 05:02:37.571933 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pcj87\" (UniqueName: \"kubernetes.io/projected/f6131f05-c4b1-4405-8b72-400d996e671d-kube-api-access-pcj87\") pod \"redhat-operators-78n8f\" (UID: \"f6131f05-c4b1-4405-8b72-400d996e671d\") " pod="openshift-marketplace/redhat-operators-78n8f" Oct 11 05:02:37 crc kubenswrapper[4798]: I1011 05:02:37.572427 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f6131f05-c4b1-4405-8b72-400d996e671d-utilities\") pod \"redhat-operators-78n8f\" (UID: \"f6131f05-c4b1-4405-8b72-400d996e671d\") " pod="openshift-marketplace/redhat-operators-78n8f" Oct 11 05:02:37 crc kubenswrapper[4798]: I1011 05:02:37.572792 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f6131f05-c4b1-4405-8b72-400d996e671d-catalog-content\") pod \"redhat-operators-78n8f\" (UID: \"f6131f05-c4b1-4405-8b72-400d996e671d\") " pod="openshift-marketplace/redhat-operators-78n8f" Oct 11 05:02:37 crc kubenswrapper[4798]: I1011 05:02:37.590893 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pcj87\" (UniqueName: \"kubernetes.io/projected/f6131f05-c4b1-4405-8b72-400d996e671d-kube-api-access-pcj87\") pod \"redhat-operators-78n8f\" (UID: \"f6131f05-c4b1-4405-8b72-400d996e671d\") " pod="openshift-marketplace/redhat-operators-78n8f" Oct 11 05:02:37 crc kubenswrapper[4798]: I1011 05:02:37.749248 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-78n8f" Oct 11 05:02:38 crc kubenswrapper[4798]: I1011 05:02:38.250532 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-78n8f"] Oct 11 05:02:38 crc kubenswrapper[4798]: I1011 05:02:38.923031 4798 generic.go:334] "Generic (PLEG): container finished" podID="f6131f05-c4b1-4405-8b72-400d996e671d" containerID="df49bd7614cb70cd996524c9202bf9cc1710b0cb61e003c9fb1c2c696f017f4c" exitCode=0 Oct 11 05:02:38 crc kubenswrapper[4798]: I1011 05:02:38.923104 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-78n8f" event={"ID":"f6131f05-c4b1-4405-8b72-400d996e671d","Type":"ContainerDied","Data":"df49bd7614cb70cd996524c9202bf9cc1710b0cb61e003c9fb1c2c696f017f4c"} Oct 11 05:02:38 crc kubenswrapper[4798]: I1011 05:02:38.923670 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-78n8f" event={"ID":"f6131f05-c4b1-4405-8b72-400d996e671d","Type":"ContainerStarted","Data":"c812837a603027913a79be9aa096fb36eaefd519ff52a307339a846371f8d386"} Oct 11 05:02:38 crc kubenswrapper[4798]: I1011 05:02:38.925331 4798 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Oct 11 05:02:39 crc kubenswrapper[4798]: I1011 05:02:39.935976 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-78n8f" event={"ID":"f6131f05-c4b1-4405-8b72-400d996e671d","Type":"ContainerStarted","Data":"2c967d2df6da724068a3f96909a8465f0db3aef16125b0700352eae727ecbaa4"} Oct 11 05:02:40 crc kubenswrapper[4798]: I1011 05:02:40.950247 4798 generic.go:334] "Generic (PLEG): container finished" podID="f6131f05-c4b1-4405-8b72-400d996e671d" containerID="2c967d2df6da724068a3f96909a8465f0db3aef16125b0700352eae727ecbaa4" exitCode=0 Oct 11 05:02:40 crc kubenswrapper[4798]: I1011 05:02:40.950370 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-78n8f" event={"ID":"f6131f05-c4b1-4405-8b72-400d996e671d","Type":"ContainerDied","Data":"2c967d2df6da724068a3f96909a8465f0db3aef16125b0700352eae727ecbaa4"} Oct 11 05:02:41 crc kubenswrapper[4798]: I1011 05:02:41.966852 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-78n8f" event={"ID":"f6131f05-c4b1-4405-8b72-400d996e671d","Type":"ContainerStarted","Data":"56cea38f0b608463351e339edbf3a66a3aad70a4be5019e5e59544168253caad"} Oct 11 05:02:41 crc kubenswrapper[4798]: I1011 05:02:41.990185 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-78n8f" podStartSLOduration=2.547670122 podStartE2EDuration="4.990166504s" podCreationTimestamp="2025-10-11 05:02:37 +0000 UTC" firstStartedPulling="2025-10-11 05:02:38.925076254 +0000 UTC m=+4054.261365940" lastFinishedPulling="2025-10-11 05:02:41.367572636 +0000 UTC m=+4056.703862322" observedRunningTime="2025-10-11 05:02:41.986292153 +0000 UTC m=+4057.322581849" watchObservedRunningTime="2025-10-11 05:02:41.990166504 +0000 UTC m=+4057.326456190" Oct 11 05:02:44 crc kubenswrapper[4798]: I1011 05:02:44.898468 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-5b446d88c5-b7hmq_25dd9371-dab0-4e09-b2a3-da8a9dfe7135/cert-manager-controller/0.log" Oct 11 05:02:45 crc kubenswrapper[4798]: I1011 05:02:45.324992 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-cainjector-7f985d654d-fgz9c_5e28bfca-a266-4225-8f9f-4d7e71691047/cert-manager-cainjector/0.log" Oct 11 05:02:45 crc kubenswrapper[4798]: I1011 05:02:45.412740 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-webhook-5655c58dd6-kkd7l_0564940f-8ec6-4a8f-8f1e-cda48ac953c2/cert-manager-webhook/0.log" Oct 11 05:02:47 crc kubenswrapper[4798]: I1011 05:02:47.750100 4798 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-78n8f" Oct 11 05:02:47 crc kubenswrapper[4798]: I1011 05:02:47.750723 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-78n8f" Oct 11 05:02:47 crc kubenswrapper[4798]: I1011 05:02:47.819154 4798 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-78n8f" Oct 11 05:02:48 crc kubenswrapper[4798]: I1011 05:02:48.079041 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-78n8f" Oct 11 05:02:48 crc kubenswrapper[4798]: I1011 05:02:48.138005 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-78n8f"] Oct 11 05:02:50 crc kubenswrapper[4798]: I1011 05:02:50.041544 4798 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-78n8f" podUID="f6131f05-c4b1-4405-8b72-400d996e671d" containerName="registry-server" containerID="cri-o://56cea38f0b608463351e339edbf3a66a3aad70a4be5019e5e59544168253caad" gracePeriod=2 Oct 11 05:02:50 crc kubenswrapper[4798]: I1011 05:02:50.551786 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-78n8f" Oct 11 05:02:50 crc kubenswrapper[4798]: I1011 05:02:50.713585 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pcj87\" (UniqueName: \"kubernetes.io/projected/f6131f05-c4b1-4405-8b72-400d996e671d-kube-api-access-pcj87\") pod \"f6131f05-c4b1-4405-8b72-400d996e671d\" (UID: \"f6131f05-c4b1-4405-8b72-400d996e671d\") " Oct 11 05:02:50 crc kubenswrapper[4798]: I1011 05:02:50.713842 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f6131f05-c4b1-4405-8b72-400d996e671d-catalog-content\") pod \"f6131f05-c4b1-4405-8b72-400d996e671d\" (UID: \"f6131f05-c4b1-4405-8b72-400d996e671d\") " Oct 11 05:02:50 crc kubenswrapper[4798]: I1011 05:02:50.718719 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f6131f05-c4b1-4405-8b72-400d996e671d-utilities\") pod \"f6131f05-c4b1-4405-8b72-400d996e671d\" (UID: \"f6131f05-c4b1-4405-8b72-400d996e671d\") " Oct 11 05:02:50 crc kubenswrapper[4798]: I1011 05:02:50.719895 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f6131f05-c4b1-4405-8b72-400d996e671d-utilities" (OuterVolumeSpecName: "utilities") pod "f6131f05-c4b1-4405-8b72-400d996e671d" (UID: "f6131f05-c4b1-4405-8b72-400d996e671d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 05:02:50 crc kubenswrapper[4798]: I1011 05:02:50.720143 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f6131f05-c4b1-4405-8b72-400d996e671d-kube-api-access-pcj87" (OuterVolumeSpecName: "kube-api-access-pcj87") pod "f6131f05-c4b1-4405-8b72-400d996e671d" (UID: "f6131f05-c4b1-4405-8b72-400d996e671d"). InnerVolumeSpecName "kube-api-access-pcj87". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 05:02:50 crc kubenswrapper[4798]: I1011 05:02:50.803927 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f6131f05-c4b1-4405-8b72-400d996e671d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "f6131f05-c4b1-4405-8b72-400d996e671d" (UID: "f6131f05-c4b1-4405-8b72-400d996e671d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 05:02:50 crc kubenswrapper[4798]: I1011 05:02:50.822063 4798 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f6131f05-c4b1-4405-8b72-400d996e671d-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 11 05:02:50 crc kubenswrapper[4798]: I1011 05:02:50.822108 4798 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f6131f05-c4b1-4405-8b72-400d996e671d-utilities\") on node \"crc\" DevicePath \"\"" Oct 11 05:02:50 crc kubenswrapper[4798]: I1011 05:02:50.822122 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pcj87\" (UniqueName: \"kubernetes.io/projected/f6131f05-c4b1-4405-8b72-400d996e671d-kube-api-access-pcj87\") on node \"crc\" DevicePath \"\"" Oct 11 05:02:51 crc kubenswrapper[4798]: I1011 05:02:51.067678 4798 generic.go:334] "Generic (PLEG): container finished" podID="f6131f05-c4b1-4405-8b72-400d996e671d" containerID="56cea38f0b608463351e339edbf3a66a3aad70a4be5019e5e59544168253caad" exitCode=0 Oct 11 05:02:51 crc kubenswrapper[4798]: I1011 05:02:51.068089 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-78n8f" event={"ID":"f6131f05-c4b1-4405-8b72-400d996e671d","Type":"ContainerDied","Data":"56cea38f0b608463351e339edbf3a66a3aad70a4be5019e5e59544168253caad"} Oct 11 05:02:51 crc kubenswrapper[4798]: I1011 05:02:51.068127 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-78n8f" event={"ID":"f6131f05-c4b1-4405-8b72-400d996e671d","Type":"ContainerDied","Data":"c812837a603027913a79be9aa096fb36eaefd519ff52a307339a846371f8d386"} Oct 11 05:02:51 crc kubenswrapper[4798]: I1011 05:02:51.068147 4798 scope.go:117] "RemoveContainer" containerID="56cea38f0b608463351e339edbf3a66a3aad70a4be5019e5e59544168253caad" Oct 11 05:02:51 crc kubenswrapper[4798]: I1011 05:02:51.068386 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-78n8f" Oct 11 05:02:51 crc kubenswrapper[4798]: I1011 05:02:51.093786 4798 scope.go:117] "RemoveContainer" containerID="2c967d2df6da724068a3f96909a8465f0db3aef16125b0700352eae727ecbaa4" Oct 11 05:02:51 crc kubenswrapper[4798]: I1011 05:02:51.130467 4798 scope.go:117] "RemoveContainer" containerID="df49bd7614cb70cd996524c9202bf9cc1710b0cb61e003c9fb1c2c696f017f4c" Oct 11 05:02:51 crc kubenswrapper[4798]: I1011 05:02:51.140480 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-78n8f"] Oct 11 05:02:51 crc kubenswrapper[4798]: I1011 05:02:51.145696 4798 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-78n8f"] Oct 11 05:02:51 crc kubenswrapper[4798]: I1011 05:02:51.166051 4798 scope.go:117] "RemoveContainer" containerID="56cea38f0b608463351e339edbf3a66a3aad70a4be5019e5e59544168253caad" Oct 11 05:02:51 crc kubenswrapper[4798]: E1011 05:02:51.166654 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"56cea38f0b608463351e339edbf3a66a3aad70a4be5019e5e59544168253caad\": container with ID starting with 56cea38f0b608463351e339edbf3a66a3aad70a4be5019e5e59544168253caad not found: ID does not exist" containerID="56cea38f0b608463351e339edbf3a66a3aad70a4be5019e5e59544168253caad" Oct 11 05:02:51 crc kubenswrapper[4798]: I1011 05:02:51.166689 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"56cea38f0b608463351e339edbf3a66a3aad70a4be5019e5e59544168253caad"} err="failed to get container status \"56cea38f0b608463351e339edbf3a66a3aad70a4be5019e5e59544168253caad\": rpc error: code = NotFound desc = could not find container \"56cea38f0b608463351e339edbf3a66a3aad70a4be5019e5e59544168253caad\": container with ID starting with 56cea38f0b608463351e339edbf3a66a3aad70a4be5019e5e59544168253caad not found: ID does not exist" Oct 11 05:02:51 crc kubenswrapper[4798]: I1011 05:02:51.166714 4798 scope.go:117] "RemoveContainer" containerID="2c967d2df6da724068a3f96909a8465f0db3aef16125b0700352eae727ecbaa4" Oct 11 05:02:51 crc kubenswrapper[4798]: E1011 05:02:51.166906 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2c967d2df6da724068a3f96909a8465f0db3aef16125b0700352eae727ecbaa4\": container with ID starting with 2c967d2df6da724068a3f96909a8465f0db3aef16125b0700352eae727ecbaa4 not found: ID does not exist" containerID="2c967d2df6da724068a3f96909a8465f0db3aef16125b0700352eae727ecbaa4" Oct 11 05:02:51 crc kubenswrapper[4798]: I1011 05:02:51.166932 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2c967d2df6da724068a3f96909a8465f0db3aef16125b0700352eae727ecbaa4"} err="failed to get container status \"2c967d2df6da724068a3f96909a8465f0db3aef16125b0700352eae727ecbaa4\": rpc error: code = NotFound desc = could not find container \"2c967d2df6da724068a3f96909a8465f0db3aef16125b0700352eae727ecbaa4\": container with ID starting with 2c967d2df6da724068a3f96909a8465f0db3aef16125b0700352eae727ecbaa4 not found: ID does not exist" Oct 11 05:02:51 crc kubenswrapper[4798]: I1011 05:02:51.166952 4798 scope.go:117] "RemoveContainer" containerID="df49bd7614cb70cd996524c9202bf9cc1710b0cb61e003c9fb1c2c696f017f4c" Oct 11 05:02:51 crc kubenswrapper[4798]: E1011 05:02:51.167112 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"df49bd7614cb70cd996524c9202bf9cc1710b0cb61e003c9fb1c2c696f017f4c\": container with ID starting with df49bd7614cb70cd996524c9202bf9cc1710b0cb61e003c9fb1c2c696f017f4c not found: ID does not exist" containerID="df49bd7614cb70cd996524c9202bf9cc1710b0cb61e003c9fb1c2c696f017f4c" Oct 11 05:02:51 crc kubenswrapper[4798]: I1011 05:02:51.167132 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"df49bd7614cb70cd996524c9202bf9cc1710b0cb61e003c9fb1c2c696f017f4c"} err="failed to get container status \"df49bd7614cb70cd996524c9202bf9cc1710b0cb61e003c9fb1c2c696f017f4c\": rpc error: code = NotFound desc = could not find container \"df49bd7614cb70cd996524c9202bf9cc1710b0cb61e003c9fb1c2c696f017f4c\": container with ID starting with df49bd7614cb70cd996524c9202bf9cc1710b0cb61e003c9fb1c2c696f017f4c not found: ID does not exist" Oct 11 05:02:51 crc kubenswrapper[4798]: I1011 05:02:51.437582 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f6131f05-c4b1-4405-8b72-400d996e671d" path="/var/lib/kubelet/pods/f6131f05-c4b1-4405-8b72-400d996e671d/volumes" Oct 11 05:02:59 crc kubenswrapper[4798]: I1011 05:02:59.214078 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-console-plugin-6b874cbd85-dlwsb_4db43a23-4466-4600-8b86-a39c6bd23319/nmstate-console-plugin/0.log" Oct 11 05:02:59 crc kubenswrapper[4798]: I1011 05:02:59.730966 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-handler-gflt9_ac8be373-2f59-42fb-afb2-b4449dee5657/nmstate-handler/0.log" Oct 11 05:02:59 crc kubenswrapper[4798]: I1011 05:02:59.818262 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-fdff9cb8d-9j84j_4c1789de-d93e-434d-bbd3-d1603457248b/kube-rbac-proxy/0.log" Oct 11 05:02:59 crc kubenswrapper[4798]: I1011 05:02:59.857840 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-fdff9cb8d-9j84j_4c1789de-d93e-434d-bbd3-d1603457248b/nmstate-metrics/0.log" Oct 11 05:03:00 crc kubenswrapper[4798]: I1011 05:03:00.054186 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-operator-858ddd8f98-hgtkp_211ee79e-e933-462d-9bf0-a287315deab3/nmstate-operator/0.log" Oct 11 05:03:00 crc kubenswrapper[4798]: I1011 05:03:00.129044 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-webhook-6cdbc54649-ptpf8_93ae23c3-aeb5-4d02-b48d-f1741886e18c/nmstate-webhook/0.log" Oct 11 05:03:15 crc kubenswrapper[4798]: I1011 05:03:15.059123 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-68d546b9d8-qt94z_5d4e020e-a94b-4d1e-a0b2-3572df8c2c5e/kube-rbac-proxy/0.log" Oct 11 05:03:15 crc kubenswrapper[4798]: I1011 05:03:15.157282 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-68d546b9d8-qt94z_5d4e020e-a94b-4d1e-a0b2-3572df8c2c5e/controller/0.log" Oct 11 05:03:15 crc kubenswrapper[4798]: I1011 05:03:15.283682 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-7q5rr_41a4b406-9a78-4d67-828b-81a3ce16248f/cp-frr-files/0.log" Oct 11 05:03:15 crc kubenswrapper[4798]: I1011 05:03:15.526889 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-7q5rr_41a4b406-9a78-4d67-828b-81a3ce16248f/cp-metrics/0.log" Oct 11 05:03:15 crc kubenswrapper[4798]: I1011 05:03:15.531409 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-7q5rr_41a4b406-9a78-4d67-828b-81a3ce16248f/cp-reloader/0.log" Oct 11 05:03:15 crc kubenswrapper[4798]: I1011 05:03:15.531553 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-7q5rr_41a4b406-9a78-4d67-828b-81a3ce16248f/cp-frr-files/0.log" Oct 11 05:03:15 crc kubenswrapper[4798]: I1011 05:03:15.575911 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-7q5rr_41a4b406-9a78-4d67-828b-81a3ce16248f/cp-reloader/0.log" Oct 11 05:03:15 crc kubenswrapper[4798]: I1011 05:03:15.708764 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-7q5rr_41a4b406-9a78-4d67-828b-81a3ce16248f/cp-frr-files/0.log" Oct 11 05:03:15 crc kubenswrapper[4798]: I1011 05:03:15.756744 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-7q5rr_41a4b406-9a78-4d67-828b-81a3ce16248f/cp-metrics/0.log" Oct 11 05:03:15 crc kubenswrapper[4798]: I1011 05:03:15.779573 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-7q5rr_41a4b406-9a78-4d67-828b-81a3ce16248f/cp-reloader/0.log" Oct 11 05:03:15 crc kubenswrapper[4798]: I1011 05:03:15.784706 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-7q5rr_41a4b406-9a78-4d67-828b-81a3ce16248f/cp-metrics/0.log" Oct 11 05:03:15 crc kubenswrapper[4798]: I1011 05:03:15.976335 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-7q5rr_41a4b406-9a78-4d67-828b-81a3ce16248f/cp-reloader/0.log" Oct 11 05:03:16 crc kubenswrapper[4798]: I1011 05:03:16.008325 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-7q5rr_41a4b406-9a78-4d67-828b-81a3ce16248f/cp-frr-files/0.log" Oct 11 05:03:16 crc kubenswrapper[4798]: I1011 05:03:16.036535 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-7q5rr_41a4b406-9a78-4d67-828b-81a3ce16248f/cp-metrics/0.log" Oct 11 05:03:16 crc kubenswrapper[4798]: I1011 05:03:16.070787 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-7q5rr_41a4b406-9a78-4d67-828b-81a3ce16248f/controller/0.log" Oct 11 05:03:16 crc kubenswrapper[4798]: I1011 05:03:16.248203 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-7q5rr_41a4b406-9a78-4d67-828b-81a3ce16248f/frr-metrics/0.log" Oct 11 05:03:16 crc kubenswrapper[4798]: I1011 05:03:16.283671 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-7q5rr_41a4b406-9a78-4d67-828b-81a3ce16248f/kube-rbac-proxy-frr/0.log" Oct 11 05:03:16 crc kubenswrapper[4798]: I1011 05:03:16.296925 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-7q5rr_41a4b406-9a78-4d67-828b-81a3ce16248f/kube-rbac-proxy/0.log" Oct 11 05:03:16 crc kubenswrapper[4798]: I1011 05:03:16.506302 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-7q5rr_41a4b406-9a78-4d67-828b-81a3ce16248f/reloader/0.log" Oct 11 05:03:16 crc kubenswrapper[4798]: I1011 05:03:16.564511 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-webhook-server-64bf5d555-2mrbj_71cb0bdd-c787-4000-b0fd-f7a0e84fa145/frr-k8s-webhook-server/0.log" Oct 11 05:03:17 crc kubenswrapper[4798]: I1011 05:03:17.033415 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-controller-manager-7458f849d5-nbss8_d1544005-518c-4296-b846-c5b9ac3af4c0/manager/0.log" Oct 11 05:03:17 crc kubenswrapper[4798]: I1011 05:03:17.254096 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-webhook-server-5786d94797-8n6wg_dbb4b409-1678-4ae6-b584-9442c133945f/webhook-server/0.log" Oct 11 05:03:17 crc kubenswrapper[4798]: I1011 05:03:17.344876 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-zdkcf_8a76e293-0882-4754-b825-688511b6c234/kube-rbac-proxy/0.log" Oct 11 05:03:17 crc kubenswrapper[4798]: I1011 05:03:17.720605 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-7q5rr_41a4b406-9a78-4d67-828b-81a3ce16248f/frr/0.log" Oct 11 05:03:17 crc kubenswrapper[4798]: I1011 05:03:17.881498 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-zdkcf_8a76e293-0882-4754-b825-688511b6c234/speaker/0.log" Oct 11 05:03:19 crc kubenswrapper[4798]: I1011 05:03:19.649266 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-kdlxb"] Oct 11 05:03:19 crc kubenswrapper[4798]: E1011 05:03:19.650506 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f6131f05-c4b1-4405-8b72-400d996e671d" containerName="extract-utilities" Oct 11 05:03:19 crc kubenswrapper[4798]: I1011 05:03:19.650528 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="f6131f05-c4b1-4405-8b72-400d996e671d" containerName="extract-utilities" Oct 11 05:03:19 crc kubenswrapper[4798]: E1011 05:03:19.650568 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f6131f05-c4b1-4405-8b72-400d996e671d" containerName="registry-server" Oct 11 05:03:19 crc kubenswrapper[4798]: I1011 05:03:19.650579 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="f6131f05-c4b1-4405-8b72-400d996e671d" containerName="registry-server" Oct 11 05:03:19 crc kubenswrapper[4798]: E1011 05:03:19.650637 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f6131f05-c4b1-4405-8b72-400d996e671d" containerName="extract-content" Oct 11 05:03:19 crc kubenswrapper[4798]: I1011 05:03:19.650646 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="f6131f05-c4b1-4405-8b72-400d996e671d" containerName="extract-content" Oct 11 05:03:19 crc kubenswrapper[4798]: I1011 05:03:19.650946 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="f6131f05-c4b1-4405-8b72-400d996e671d" containerName="registry-server" Oct 11 05:03:19 crc kubenswrapper[4798]: I1011 05:03:19.653224 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-kdlxb" Oct 11 05:03:19 crc kubenswrapper[4798]: I1011 05:03:19.666295 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-kdlxb"] Oct 11 05:03:19 crc kubenswrapper[4798]: I1011 05:03:19.825685 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9245e30f-492c-43c7-b26d-f79f155f85ee-catalog-content\") pod \"redhat-marketplace-kdlxb\" (UID: \"9245e30f-492c-43c7-b26d-f79f155f85ee\") " pod="openshift-marketplace/redhat-marketplace-kdlxb" Oct 11 05:03:19 crc kubenswrapper[4798]: I1011 05:03:19.825751 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x5n4x\" (UniqueName: \"kubernetes.io/projected/9245e30f-492c-43c7-b26d-f79f155f85ee-kube-api-access-x5n4x\") pod \"redhat-marketplace-kdlxb\" (UID: \"9245e30f-492c-43c7-b26d-f79f155f85ee\") " pod="openshift-marketplace/redhat-marketplace-kdlxb" Oct 11 05:03:19 crc kubenswrapper[4798]: I1011 05:03:19.826216 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9245e30f-492c-43c7-b26d-f79f155f85ee-utilities\") pod \"redhat-marketplace-kdlxb\" (UID: \"9245e30f-492c-43c7-b26d-f79f155f85ee\") " pod="openshift-marketplace/redhat-marketplace-kdlxb" Oct 11 05:03:19 crc kubenswrapper[4798]: I1011 05:03:19.928634 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9245e30f-492c-43c7-b26d-f79f155f85ee-catalog-content\") pod \"redhat-marketplace-kdlxb\" (UID: \"9245e30f-492c-43c7-b26d-f79f155f85ee\") " pod="openshift-marketplace/redhat-marketplace-kdlxb" Oct 11 05:03:19 crc kubenswrapper[4798]: I1011 05:03:19.928728 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x5n4x\" (UniqueName: \"kubernetes.io/projected/9245e30f-492c-43c7-b26d-f79f155f85ee-kube-api-access-x5n4x\") pod \"redhat-marketplace-kdlxb\" (UID: \"9245e30f-492c-43c7-b26d-f79f155f85ee\") " pod="openshift-marketplace/redhat-marketplace-kdlxb" Oct 11 05:03:19 crc kubenswrapper[4798]: I1011 05:03:19.928847 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9245e30f-492c-43c7-b26d-f79f155f85ee-utilities\") pod \"redhat-marketplace-kdlxb\" (UID: \"9245e30f-492c-43c7-b26d-f79f155f85ee\") " pod="openshift-marketplace/redhat-marketplace-kdlxb" Oct 11 05:03:19 crc kubenswrapper[4798]: I1011 05:03:19.929440 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9245e30f-492c-43c7-b26d-f79f155f85ee-catalog-content\") pod \"redhat-marketplace-kdlxb\" (UID: \"9245e30f-492c-43c7-b26d-f79f155f85ee\") " pod="openshift-marketplace/redhat-marketplace-kdlxb" Oct 11 05:03:19 crc kubenswrapper[4798]: I1011 05:03:19.929525 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9245e30f-492c-43c7-b26d-f79f155f85ee-utilities\") pod \"redhat-marketplace-kdlxb\" (UID: \"9245e30f-492c-43c7-b26d-f79f155f85ee\") " pod="openshift-marketplace/redhat-marketplace-kdlxb" Oct 11 05:03:19 crc kubenswrapper[4798]: I1011 05:03:19.962693 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x5n4x\" (UniqueName: \"kubernetes.io/projected/9245e30f-492c-43c7-b26d-f79f155f85ee-kube-api-access-x5n4x\") pod \"redhat-marketplace-kdlxb\" (UID: \"9245e30f-492c-43c7-b26d-f79f155f85ee\") " pod="openshift-marketplace/redhat-marketplace-kdlxb" Oct 11 05:03:19 crc kubenswrapper[4798]: I1011 05:03:19.979245 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-kdlxb" Oct 11 05:03:20 crc kubenswrapper[4798]: I1011 05:03:20.475413 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-kdlxb"] Oct 11 05:03:21 crc kubenswrapper[4798]: I1011 05:03:21.384908 4798 generic.go:334] "Generic (PLEG): container finished" podID="9245e30f-492c-43c7-b26d-f79f155f85ee" containerID="f5fb04783ffea0d7530f5e5a4d97bbd36dfa5b194576123f612bd084faeb1bcd" exitCode=0 Oct 11 05:03:21 crc kubenswrapper[4798]: I1011 05:03:21.385526 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-kdlxb" event={"ID":"9245e30f-492c-43c7-b26d-f79f155f85ee","Type":"ContainerDied","Data":"f5fb04783ffea0d7530f5e5a4d97bbd36dfa5b194576123f612bd084faeb1bcd"} Oct 11 05:03:21 crc kubenswrapper[4798]: I1011 05:03:21.385565 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-kdlxb" event={"ID":"9245e30f-492c-43c7-b26d-f79f155f85ee","Type":"ContainerStarted","Data":"721a7b27e4a9928c51f917c30d6bbf048c4ec8d7cd78b8e113b59eba2563d77e"} Oct 11 05:03:22 crc kubenswrapper[4798]: I1011 05:03:22.396156 4798 generic.go:334] "Generic (PLEG): container finished" podID="9245e30f-492c-43c7-b26d-f79f155f85ee" containerID="42c5f8936bbf0d0d790d280aab7e02d72ced0f5a0a49eb607439709aaf3f6588" exitCode=0 Oct 11 05:03:22 crc kubenswrapper[4798]: I1011 05:03:22.396297 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-kdlxb" event={"ID":"9245e30f-492c-43c7-b26d-f79f155f85ee","Type":"ContainerDied","Data":"42c5f8936bbf0d0d790d280aab7e02d72ced0f5a0a49eb607439709aaf3f6588"} Oct 11 05:03:24 crc kubenswrapper[4798]: I1011 05:03:24.420155 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-kdlxb" event={"ID":"9245e30f-492c-43c7-b26d-f79f155f85ee","Type":"ContainerStarted","Data":"24a3c53d829ce2d23dd61c96c9eb564dfd0260e5b2d99df77739f4f405194bb7"} Oct 11 05:03:29 crc kubenswrapper[4798]: I1011 05:03:29.980162 4798 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-kdlxb" Oct 11 05:03:29 crc kubenswrapper[4798]: I1011 05:03:29.981189 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-kdlxb" Oct 11 05:03:30 crc kubenswrapper[4798]: I1011 05:03:30.060350 4798 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-kdlxb" Oct 11 05:03:30 crc kubenswrapper[4798]: I1011 05:03:30.090100 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-kdlxb" podStartSLOduration=9.446274055 podStartE2EDuration="11.090074159s" podCreationTimestamp="2025-10-11 05:03:19 +0000 UTC" firstStartedPulling="2025-10-11 05:03:21.38882293 +0000 UTC m=+4096.725112606" lastFinishedPulling="2025-10-11 05:03:23.032623024 +0000 UTC m=+4098.368912710" observedRunningTime="2025-10-11 05:03:24.456752008 +0000 UTC m=+4099.793041694" watchObservedRunningTime="2025-10-11 05:03:30.090074159 +0000 UTC m=+4105.426363885" Oct 11 05:03:30 crc kubenswrapper[4798]: I1011 05:03:30.586966 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-kdlxb" Oct 11 05:03:30 crc kubenswrapper[4798]: I1011 05:03:30.648059 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-kdlxb"] Oct 11 05:03:32 crc kubenswrapper[4798]: I1011 05:03:32.523536 4798 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-kdlxb" podUID="9245e30f-492c-43c7-b26d-f79f155f85ee" containerName="registry-server" containerID="cri-o://24a3c53d829ce2d23dd61c96c9eb564dfd0260e5b2d99df77739f4f405194bb7" gracePeriod=2 Oct 11 05:03:33 crc kubenswrapper[4798]: I1011 05:03:33.034729 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-kdlxb" Oct 11 05:03:33 crc kubenswrapper[4798]: I1011 05:03:33.195612 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x5n4x\" (UniqueName: \"kubernetes.io/projected/9245e30f-492c-43c7-b26d-f79f155f85ee-kube-api-access-x5n4x\") pod \"9245e30f-492c-43c7-b26d-f79f155f85ee\" (UID: \"9245e30f-492c-43c7-b26d-f79f155f85ee\") " Oct 11 05:03:33 crc kubenswrapper[4798]: I1011 05:03:33.195742 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9245e30f-492c-43c7-b26d-f79f155f85ee-utilities\") pod \"9245e30f-492c-43c7-b26d-f79f155f85ee\" (UID: \"9245e30f-492c-43c7-b26d-f79f155f85ee\") " Oct 11 05:03:33 crc kubenswrapper[4798]: I1011 05:03:33.196009 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9245e30f-492c-43c7-b26d-f79f155f85ee-catalog-content\") pod \"9245e30f-492c-43c7-b26d-f79f155f85ee\" (UID: \"9245e30f-492c-43c7-b26d-f79f155f85ee\") " Oct 11 05:03:33 crc kubenswrapper[4798]: I1011 05:03:33.197082 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9245e30f-492c-43c7-b26d-f79f155f85ee-utilities" (OuterVolumeSpecName: "utilities") pod "9245e30f-492c-43c7-b26d-f79f155f85ee" (UID: "9245e30f-492c-43c7-b26d-f79f155f85ee"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 05:03:33 crc kubenswrapper[4798]: I1011 05:03:33.203622 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9245e30f-492c-43c7-b26d-f79f155f85ee-kube-api-access-x5n4x" (OuterVolumeSpecName: "kube-api-access-x5n4x") pod "9245e30f-492c-43c7-b26d-f79f155f85ee" (UID: "9245e30f-492c-43c7-b26d-f79f155f85ee"). InnerVolumeSpecName "kube-api-access-x5n4x". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 05:03:33 crc kubenswrapper[4798]: I1011 05:03:33.210491 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9245e30f-492c-43c7-b26d-f79f155f85ee-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "9245e30f-492c-43c7-b26d-f79f155f85ee" (UID: "9245e30f-492c-43c7-b26d-f79f155f85ee"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 05:03:33 crc kubenswrapper[4798]: I1011 05:03:33.298696 4798 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9245e30f-492c-43c7-b26d-f79f155f85ee-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 11 05:03:33 crc kubenswrapper[4798]: I1011 05:03:33.298731 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x5n4x\" (UniqueName: \"kubernetes.io/projected/9245e30f-492c-43c7-b26d-f79f155f85ee-kube-api-access-x5n4x\") on node \"crc\" DevicePath \"\"" Oct 11 05:03:33 crc kubenswrapper[4798]: I1011 05:03:33.298744 4798 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9245e30f-492c-43c7-b26d-f79f155f85ee-utilities\") on node \"crc\" DevicePath \"\"" Oct 11 05:03:33 crc kubenswrapper[4798]: I1011 05:03:33.533209 4798 generic.go:334] "Generic (PLEG): container finished" podID="9245e30f-492c-43c7-b26d-f79f155f85ee" containerID="24a3c53d829ce2d23dd61c96c9eb564dfd0260e5b2d99df77739f4f405194bb7" exitCode=0 Oct 11 05:03:33 crc kubenswrapper[4798]: I1011 05:03:33.533314 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-kdlxb" Oct 11 05:03:33 crc kubenswrapper[4798]: I1011 05:03:33.533292 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-kdlxb" event={"ID":"9245e30f-492c-43c7-b26d-f79f155f85ee","Type":"ContainerDied","Data":"24a3c53d829ce2d23dd61c96c9eb564dfd0260e5b2d99df77739f4f405194bb7"} Oct 11 05:03:33 crc kubenswrapper[4798]: I1011 05:03:33.533764 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-kdlxb" event={"ID":"9245e30f-492c-43c7-b26d-f79f155f85ee","Type":"ContainerDied","Data":"721a7b27e4a9928c51f917c30d6bbf048c4ec8d7cd78b8e113b59eba2563d77e"} Oct 11 05:03:33 crc kubenswrapper[4798]: I1011 05:03:33.533801 4798 scope.go:117] "RemoveContainer" containerID="24a3c53d829ce2d23dd61c96c9eb564dfd0260e5b2d99df77739f4f405194bb7" Oct 11 05:03:33 crc kubenswrapper[4798]: I1011 05:03:33.568430 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-kdlxb"] Oct 11 05:03:33 crc kubenswrapper[4798]: I1011 05:03:33.569771 4798 scope.go:117] "RemoveContainer" containerID="42c5f8936bbf0d0d790d280aab7e02d72ced0f5a0a49eb607439709aaf3f6588" Oct 11 05:03:33 crc kubenswrapper[4798]: I1011 05:03:33.587813 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d2vws4k_c25b8b69-843d-4c9e-80f8-0e41b7f8aec3/util/0.log" Oct 11 05:03:33 crc kubenswrapper[4798]: I1011 05:03:33.592179 4798 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-kdlxb"] Oct 11 05:03:33 crc kubenswrapper[4798]: I1011 05:03:33.593829 4798 scope.go:117] "RemoveContainer" containerID="f5fb04783ffea0d7530f5e5a4d97bbd36dfa5b194576123f612bd084faeb1bcd" Oct 11 05:03:33 crc kubenswrapper[4798]: I1011 05:03:33.636934 4798 scope.go:117] "RemoveContainer" containerID="24a3c53d829ce2d23dd61c96c9eb564dfd0260e5b2d99df77739f4f405194bb7" Oct 11 05:03:33 crc kubenswrapper[4798]: E1011 05:03:33.637570 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"24a3c53d829ce2d23dd61c96c9eb564dfd0260e5b2d99df77739f4f405194bb7\": container with ID starting with 24a3c53d829ce2d23dd61c96c9eb564dfd0260e5b2d99df77739f4f405194bb7 not found: ID does not exist" containerID="24a3c53d829ce2d23dd61c96c9eb564dfd0260e5b2d99df77739f4f405194bb7" Oct 11 05:03:33 crc kubenswrapper[4798]: I1011 05:03:33.637605 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"24a3c53d829ce2d23dd61c96c9eb564dfd0260e5b2d99df77739f4f405194bb7"} err="failed to get container status \"24a3c53d829ce2d23dd61c96c9eb564dfd0260e5b2d99df77739f4f405194bb7\": rpc error: code = NotFound desc = could not find container \"24a3c53d829ce2d23dd61c96c9eb564dfd0260e5b2d99df77739f4f405194bb7\": container with ID starting with 24a3c53d829ce2d23dd61c96c9eb564dfd0260e5b2d99df77739f4f405194bb7 not found: ID does not exist" Oct 11 05:03:33 crc kubenswrapper[4798]: I1011 05:03:33.637628 4798 scope.go:117] "RemoveContainer" containerID="42c5f8936bbf0d0d790d280aab7e02d72ced0f5a0a49eb607439709aaf3f6588" Oct 11 05:03:33 crc kubenswrapper[4798]: E1011 05:03:33.638289 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"42c5f8936bbf0d0d790d280aab7e02d72ced0f5a0a49eb607439709aaf3f6588\": container with ID starting with 42c5f8936bbf0d0d790d280aab7e02d72ced0f5a0a49eb607439709aaf3f6588 not found: ID does not exist" containerID="42c5f8936bbf0d0d790d280aab7e02d72ced0f5a0a49eb607439709aaf3f6588" Oct 11 05:03:33 crc kubenswrapper[4798]: I1011 05:03:33.638384 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"42c5f8936bbf0d0d790d280aab7e02d72ced0f5a0a49eb607439709aaf3f6588"} err="failed to get container status \"42c5f8936bbf0d0d790d280aab7e02d72ced0f5a0a49eb607439709aaf3f6588\": rpc error: code = NotFound desc = could not find container \"42c5f8936bbf0d0d790d280aab7e02d72ced0f5a0a49eb607439709aaf3f6588\": container with ID starting with 42c5f8936bbf0d0d790d280aab7e02d72ced0f5a0a49eb607439709aaf3f6588 not found: ID does not exist" Oct 11 05:03:33 crc kubenswrapper[4798]: I1011 05:03:33.638436 4798 scope.go:117] "RemoveContainer" containerID="f5fb04783ffea0d7530f5e5a4d97bbd36dfa5b194576123f612bd084faeb1bcd" Oct 11 05:03:33 crc kubenswrapper[4798]: E1011 05:03:33.638901 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f5fb04783ffea0d7530f5e5a4d97bbd36dfa5b194576123f612bd084faeb1bcd\": container with ID starting with f5fb04783ffea0d7530f5e5a4d97bbd36dfa5b194576123f612bd084faeb1bcd not found: ID does not exist" containerID="f5fb04783ffea0d7530f5e5a4d97bbd36dfa5b194576123f612bd084faeb1bcd" Oct 11 05:03:33 crc kubenswrapper[4798]: I1011 05:03:33.638933 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f5fb04783ffea0d7530f5e5a4d97bbd36dfa5b194576123f612bd084faeb1bcd"} err="failed to get container status \"f5fb04783ffea0d7530f5e5a4d97bbd36dfa5b194576123f612bd084faeb1bcd\": rpc error: code = NotFound desc = could not find container \"f5fb04783ffea0d7530f5e5a4d97bbd36dfa5b194576123f612bd084faeb1bcd\": container with ID starting with f5fb04783ffea0d7530f5e5a4d97bbd36dfa5b194576123f612bd084faeb1bcd not found: ID does not exist" Oct 11 05:03:33 crc kubenswrapper[4798]: I1011 05:03:33.765656 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d2vws4k_c25b8b69-843d-4c9e-80f8-0e41b7f8aec3/pull/0.log" Oct 11 05:03:33 crc kubenswrapper[4798]: I1011 05:03:33.776575 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d2vws4k_c25b8b69-843d-4c9e-80f8-0e41b7f8aec3/util/0.log" Oct 11 05:03:33 crc kubenswrapper[4798]: I1011 05:03:33.831767 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d2vws4k_c25b8b69-843d-4c9e-80f8-0e41b7f8aec3/pull/0.log" Oct 11 05:03:33 crc kubenswrapper[4798]: I1011 05:03:33.978633 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d2vws4k_c25b8b69-843d-4c9e-80f8-0e41b7f8aec3/extract/0.log" Oct 11 05:03:33 crc kubenswrapper[4798]: I1011 05:03:33.979769 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d2vws4k_c25b8b69-843d-4c9e-80f8-0e41b7f8aec3/util/0.log" Oct 11 05:03:34 crc kubenswrapper[4798]: I1011 05:03:34.012557 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d2vws4k_c25b8b69-843d-4c9e-80f8-0e41b7f8aec3/pull/0.log" Oct 11 05:03:34 crc kubenswrapper[4798]: I1011 05:03:34.187550 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-pbct9_9c0c7752-53e3-489c-b668-370e654f482a/extract-utilities/0.log" Oct 11 05:03:34 crc kubenswrapper[4798]: I1011 05:03:34.341622 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-pbct9_9c0c7752-53e3-489c-b668-370e654f482a/extract-utilities/0.log" Oct 11 05:03:34 crc kubenswrapper[4798]: I1011 05:03:34.373668 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-pbct9_9c0c7752-53e3-489c-b668-370e654f482a/extract-content/0.log" Oct 11 05:03:34 crc kubenswrapper[4798]: I1011 05:03:34.392002 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-pbct9_9c0c7752-53e3-489c-b668-370e654f482a/extract-content/0.log" Oct 11 05:03:34 crc kubenswrapper[4798]: I1011 05:03:34.566836 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-pbct9_9c0c7752-53e3-489c-b668-370e654f482a/extract-utilities/0.log" Oct 11 05:03:34 crc kubenswrapper[4798]: I1011 05:03:34.615807 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-pbct9_9c0c7752-53e3-489c-b668-370e654f482a/extract-content/0.log" Oct 11 05:03:35 crc kubenswrapper[4798]: I1011 05:03:35.021895 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-pbct9_9c0c7752-53e3-489c-b668-370e654f482a/registry-server/0.log" Oct 11 05:03:35 crc kubenswrapper[4798]: I1011 05:03:35.093252 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-drk2l_dfc6a2a4-b981-4d28-be73-8d6689b028cd/extract-utilities/0.log" Oct 11 05:03:35 crc kubenswrapper[4798]: I1011 05:03:35.203461 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-drk2l_dfc6a2a4-b981-4d28-be73-8d6689b028cd/extract-utilities/0.log" Oct 11 05:03:35 crc kubenswrapper[4798]: I1011 05:03:35.208681 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-drk2l_dfc6a2a4-b981-4d28-be73-8d6689b028cd/extract-content/0.log" Oct 11 05:03:35 crc kubenswrapper[4798]: I1011 05:03:35.244142 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-drk2l_dfc6a2a4-b981-4d28-be73-8d6689b028cd/extract-content/0.log" Oct 11 05:03:35 crc kubenswrapper[4798]: I1011 05:03:35.389071 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-drk2l_dfc6a2a4-b981-4d28-be73-8d6689b028cd/extract-content/0.log" Oct 11 05:03:35 crc kubenswrapper[4798]: I1011 05:03:35.395354 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-drk2l_dfc6a2a4-b981-4d28-be73-8d6689b028cd/extract-utilities/0.log" Oct 11 05:03:35 crc kubenswrapper[4798]: I1011 05:03:35.435445 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9245e30f-492c-43c7-b26d-f79f155f85ee" path="/var/lib/kubelet/pods/9245e30f-492c-43c7-b26d-f79f155f85ee/volumes" Oct 11 05:03:36 crc kubenswrapper[4798]: I1011 05:03:36.054989 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-drk2l_dfc6a2a4-b981-4d28-be73-8d6689b028cd/registry-server/0.log" Oct 11 05:03:36 crc kubenswrapper[4798]: I1011 05:03:36.188368 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835c55rlp_177b2fa2-7b2c-4483-a0c5-2c3cf6ceeed2/util/0.log" Oct 11 05:03:36 crc kubenswrapper[4798]: I1011 05:03:36.370135 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835c55rlp_177b2fa2-7b2c-4483-a0c5-2c3cf6ceeed2/util/0.log" Oct 11 05:03:36 crc kubenswrapper[4798]: I1011 05:03:36.387426 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835c55rlp_177b2fa2-7b2c-4483-a0c5-2c3cf6ceeed2/pull/0.log" Oct 11 05:03:36 crc kubenswrapper[4798]: I1011 05:03:36.402364 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835c55rlp_177b2fa2-7b2c-4483-a0c5-2c3cf6ceeed2/pull/0.log" Oct 11 05:03:36 crc kubenswrapper[4798]: I1011 05:03:36.554147 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835c55rlp_177b2fa2-7b2c-4483-a0c5-2c3cf6ceeed2/util/0.log" Oct 11 05:03:36 crc kubenswrapper[4798]: I1011 05:03:36.585123 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835c55rlp_177b2fa2-7b2c-4483-a0c5-2c3cf6ceeed2/pull/0.log" Oct 11 05:03:36 crc kubenswrapper[4798]: I1011 05:03:36.599295 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835c55rlp_177b2fa2-7b2c-4483-a0c5-2c3cf6ceeed2/extract/0.log" Oct 11 05:03:36 crc kubenswrapper[4798]: I1011 05:03:36.807597 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-rkg2t_0d156e8f-2a95-406a-beb0-c9b7e36f9e8b/marketplace-operator/0.log" Oct 11 05:03:37 crc kubenswrapper[4798]: I1011 05:03:37.225105 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-v8lpr_462b253d-c42e-4640-acdc-defc5c34032f/extract-utilities/0.log" Oct 11 05:03:37 crc kubenswrapper[4798]: I1011 05:03:37.430492 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-v8lpr_462b253d-c42e-4640-acdc-defc5c34032f/extract-utilities/0.log" Oct 11 05:03:37 crc kubenswrapper[4798]: I1011 05:03:37.446430 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-v8lpr_462b253d-c42e-4640-acdc-defc5c34032f/extract-content/0.log" Oct 11 05:03:37 crc kubenswrapper[4798]: I1011 05:03:37.470826 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-v8lpr_462b253d-c42e-4640-acdc-defc5c34032f/extract-content/0.log" Oct 11 05:03:37 crc kubenswrapper[4798]: I1011 05:03:37.689420 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-v8lpr_462b253d-c42e-4640-acdc-defc5c34032f/extract-content/0.log" Oct 11 05:03:37 crc kubenswrapper[4798]: I1011 05:03:37.689443 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-v8lpr_462b253d-c42e-4640-acdc-defc5c34032f/extract-utilities/0.log" Oct 11 05:03:38 crc kubenswrapper[4798]: I1011 05:03:38.265474 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-h4bsh_5315221c-e6ab-4c3f-9d74-5bf456a5b78e/extract-utilities/0.log" Oct 11 05:03:38 crc kubenswrapper[4798]: I1011 05:03:38.415712 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-9fctq"] Oct 11 05:03:38 crc kubenswrapper[4798]: E1011 05:03:38.416357 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9245e30f-492c-43c7-b26d-f79f155f85ee" containerName="registry-server" Oct 11 05:03:38 crc kubenswrapper[4798]: I1011 05:03:38.416383 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="9245e30f-492c-43c7-b26d-f79f155f85ee" containerName="registry-server" Oct 11 05:03:38 crc kubenswrapper[4798]: E1011 05:03:38.416436 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9245e30f-492c-43c7-b26d-f79f155f85ee" containerName="extract-content" Oct 11 05:03:38 crc kubenswrapper[4798]: I1011 05:03:38.416447 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="9245e30f-492c-43c7-b26d-f79f155f85ee" containerName="extract-content" Oct 11 05:03:38 crc kubenswrapper[4798]: E1011 05:03:38.416467 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9245e30f-492c-43c7-b26d-f79f155f85ee" containerName="extract-utilities" Oct 11 05:03:38 crc kubenswrapper[4798]: I1011 05:03:38.416476 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="9245e30f-492c-43c7-b26d-f79f155f85ee" containerName="extract-utilities" Oct 11 05:03:38 crc kubenswrapper[4798]: I1011 05:03:38.416743 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="9245e30f-492c-43c7-b26d-f79f155f85ee" containerName="registry-server" Oct 11 05:03:38 crc kubenswrapper[4798]: I1011 05:03:38.424794 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-v8lpr_462b253d-c42e-4640-acdc-defc5c34032f/registry-server/0.log" Oct 11 05:03:38 crc kubenswrapper[4798]: I1011 05:03:38.435988 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-9fctq" Oct 11 05:03:38 crc kubenswrapper[4798]: I1011 05:03:38.469502 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-9fctq"] Oct 11 05:03:38 crc kubenswrapper[4798]: I1011 05:03:38.511524 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2slfz\" (UniqueName: \"kubernetes.io/projected/9988da93-f29c-4541-bf93-677fdfb61a35-kube-api-access-2slfz\") pod \"community-operators-9fctq\" (UID: \"9988da93-f29c-4541-bf93-677fdfb61a35\") " pod="openshift-marketplace/community-operators-9fctq" Oct 11 05:03:38 crc kubenswrapper[4798]: I1011 05:03:38.512564 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9988da93-f29c-4541-bf93-677fdfb61a35-utilities\") pod \"community-operators-9fctq\" (UID: \"9988da93-f29c-4541-bf93-677fdfb61a35\") " pod="openshift-marketplace/community-operators-9fctq" Oct 11 05:03:38 crc kubenswrapper[4798]: I1011 05:03:38.512754 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9988da93-f29c-4541-bf93-677fdfb61a35-catalog-content\") pod \"community-operators-9fctq\" (UID: \"9988da93-f29c-4541-bf93-677fdfb61a35\") " pod="openshift-marketplace/community-operators-9fctq" Oct 11 05:03:38 crc kubenswrapper[4798]: I1011 05:03:38.575608 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-h4bsh_5315221c-e6ab-4c3f-9d74-5bf456a5b78e/extract-content/0.log" Oct 11 05:03:38 crc kubenswrapper[4798]: I1011 05:03:38.612098 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-h4bsh_5315221c-e6ab-4c3f-9d74-5bf456a5b78e/extract-utilities/0.log" Oct 11 05:03:38 crc kubenswrapper[4798]: I1011 05:03:38.614800 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9988da93-f29c-4541-bf93-677fdfb61a35-catalog-content\") pod \"community-operators-9fctq\" (UID: \"9988da93-f29c-4541-bf93-677fdfb61a35\") " pod="openshift-marketplace/community-operators-9fctq" Oct 11 05:03:38 crc kubenswrapper[4798]: I1011 05:03:38.614895 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2slfz\" (UniqueName: \"kubernetes.io/projected/9988da93-f29c-4541-bf93-677fdfb61a35-kube-api-access-2slfz\") pod \"community-operators-9fctq\" (UID: \"9988da93-f29c-4541-bf93-677fdfb61a35\") " pod="openshift-marketplace/community-operators-9fctq" Oct 11 05:03:38 crc kubenswrapper[4798]: I1011 05:03:38.614982 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9988da93-f29c-4541-bf93-677fdfb61a35-utilities\") pod \"community-operators-9fctq\" (UID: \"9988da93-f29c-4541-bf93-677fdfb61a35\") " pod="openshift-marketplace/community-operators-9fctq" Oct 11 05:03:38 crc kubenswrapper[4798]: I1011 05:03:38.615322 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9988da93-f29c-4541-bf93-677fdfb61a35-catalog-content\") pod \"community-operators-9fctq\" (UID: \"9988da93-f29c-4541-bf93-677fdfb61a35\") " pod="openshift-marketplace/community-operators-9fctq" Oct 11 05:03:38 crc kubenswrapper[4798]: I1011 05:03:38.615790 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9988da93-f29c-4541-bf93-677fdfb61a35-utilities\") pod \"community-operators-9fctq\" (UID: \"9988da93-f29c-4541-bf93-677fdfb61a35\") " pod="openshift-marketplace/community-operators-9fctq" Oct 11 05:03:38 crc kubenswrapper[4798]: I1011 05:03:38.634906 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2slfz\" (UniqueName: \"kubernetes.io/projected/9988da93-f29c-4541-bf93-677fdfb61a35-kube-api-access-2slfz\") pod \"community-operators-9fctq\" (UID: \"9988da93-f29c-4541-bf93-677fdfb61a35\") " pod="openshift-marketplace/community-operators-9fctq" Oct 11 05:03:38 crc kubenswrapper[4798]: I1011 05:03:38.688073 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-h4bsh_5315221c-e6ab-4c3f-9d74-5bf456a5b78e/extract-content/0.log" Oct 11 05:03:38 crc kubenswrapper[4798]: I1011 05:03:38.763268 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-9fctq" Oct 11 05:03:39 crc kubenswrapper[4798]: I1011 05:03:39.101885 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-h4bsh_5315221c-e6ab-4c3f-9d74-5bf456a5b78e/extract-utilities/0.log" Oct 11 05:03:39 crc kubenswrapper[4798]: I1011 05:03:39.119951 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-h4bsh_5315221c-e6ab-4c3f-9d74-5bf456a5b78e/extract-content/0.log" Oct 11 05:03:39 crc kubenswrapper[4798]: I1011 05:03:39.306215 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-9fctq"] Oct 11 05:03:39 crc kubenswrapper[4798]: I1011 05:03:39.611950 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-h4bsh_5315221c-e6ab-4c3f-9d74-5bf456a5b78e/registry-server/0.log" Oct 11 05:03:39 crc kubenswrapper[4798]: I1011 05:03:39.612546 4798 generic.go:334] "Generic (PLEG): container finished" podID="9988da93-f29c-4541-bf93-677fdfb61a35" containerID="71c45207ae90b48e8804d9297f23f24351ed3b353ba7ffdd8c3896f37f8de9a3" exitCode=0 Oct 11 05:03:39 crc kubenswrapper[4798]: I1011 05:03:39.612627 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-9fctq" event={"ID":"9988da93-f29c-4541-bf93-677fdfb61a35","Type":"ContainerDied","Data":"71c45207ae90b48e8804d9297f23f24351ed3b353ba7ffdd8c3896f37f8de9a3"} Oct 11 05:03:39 crc kubenswrapper[4798]: I1011 05:03:39.612706 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-9fctq" event={"ID":"9988da93-f29c-4541-bf93-677fdfb61a35","Type":"ContainerStarted","Data":"a4d425a5811b4cf2fcd24596db9c63efb99677b1037952a785062ffeffc2bab0"} Oct 11 05:03:40 crc kubenswrapper[4798]: I1011 05:03:40.624178 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-9fctq" event={"ID":"9988da93-f29c-4541-bf93-677fdfb61a35","Type":"ContainerStarted","Data":"82663fb1cb0972a4811ebac9aad4f2ea9e2e653cb7df4c582bd2055dd6e88aba"} Oct 11 05:03:41 crc kubenswrapper[4798]: I1011 05:03:41.691854 4798 generic.go:334] "Generic (PLEG): container finished" podID="9988da93-f29c-4541-bf93-677fdfb61a35" containerID="82663fb1cb0972a4811ebac9aad4f2ea9e2e653cb7df4c582bd2055dd6e88aba" exitCode=0 Oct 11 05:03:41 crc kubenswrapper[4798]: I1011 05:03:41.692517 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-9fctq" event={"ID":"9988da93-f29c-4541-bf93-677fdfb61a35","Type":"ContainerDied","Data":"82663fb1cb0972a4811ebac9aad4f2ea9e2e653cb7df4c582bd2055dd6e88aba"} Oct 11 05:03:42 crc kubenswrapper[4798]: I1011 05:03:42.708703 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-9fctq" event={"ID":"9988da93-f29c-4541-bf93-677fdfb61a35","Type":"ContainerStarted","Data":"3652cf6adabbe03284f80d92219cb05c90cc03571643f2772f5c1274ed832386"} Oct 11 05:03:42 crc kubenswrapper[4798]: I1011 05:03:42.739577 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-9fctq" podStartSLOduration=2.202612329 podStartE2EDuration="4.739550065s" podCreationTimestamp="2025-10-11 05:03:38 +0000 UTC" firstStartedPulling="2025-10-11 05:03:39.614249673 +0000 UTC m=+4114.950539399" lastFinishedPulling="2025-10-11 05:03:42.151187439 +0000 UTC m=+4117.487477135" observedRunningTime="2025-10-11 05:03:42.736274789 +0000 UTC m=+4118.072564485" watchObservedRunningTime="2025-10-11 05:03:42.739550065 +0000 UTC m=+4118.075839761" Oct 11 05:03:48 crc kubenswrapper[4798]: I1011 05:03:48.763777 4798 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-9fctq" Oct 11 05:03:48 crc kubenswrapper[4798]: I1011 05:03:48.764479 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-9fctq" Oct 11 05:03:48 crc kubenswrapper[4798]: I1011 05:03:48.857938 4798 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-9fctq" Oct 11 05:03:49 crc kubenswrapper[4798]: I1011 05:03:49.865549 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-9fctq" Oct 11 05:03:50 crc kubenswrapper[4798]: I1011 05:03:50.118497 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-9fctq"] Oct 11 05:03:51 crc kubenswrapper[4798]: I1011 05:03:51.819983 4798 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-9fctq" podUID="9988da93-f29c-4541-bf93-677fdfb61a35" containerName="registry-server" containerID="cri-o://3652cf6adabbe03284f80d92219cb05c90cc03571643f2772f5c1274ed832386" gracePeriod=2 Oct 11 05:03:52 crc kubenswrapper[4798]: I1011 05:03:52.403814 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-9fctq" Oct 11 05:03:52 crc kubenswrapper[4798]: I1011 05:03:52.407874 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9988da93-f29c-4541-bf93-677fdfb61a35-catalog-content\") pod \"9988da93-f29c-4541-bf93-677fdfb61a35\" (UID: \"9988da93-f29c-4541-bf93-677fdfb61a35\") " Oct 11 05:03:52 crc kubenswrapper[4798]: I1011 05:03:52.408220 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2slfz\" (UniqueName: \"kubernetes.io/projected/9988da93-f29c-4541-bf93-677fdfb61a35-kube-api-access-2slfz\") pod \"9988da93-f29c-4541-bf93-677fdfb61a35\" (UID: \"9988da93-f29c-4541-bf93-677fdfb61a35\") " Oct 11 05:03:52 crc kubenswrapper[4798]: I1011 05:03:52.408532 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9988da93-f29c-4541-bf93-677fdfb61a35-utilities\") pod \"9988da93-f29c-4541-bf93-677fdfb61a35\" (UID: \"9988da93-f29c-4541-bf93-677fdfb61a35\") " Oct 11 05:03:52 crc kubenswrapper[4798]: I1011 05:03:52.409473 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9988da93-f29c-4541-bf93-677fdfb61a35-utilities" (OuterVolumeSpecName: "utilities") pod "9988da93-f29c-4541-bf93-677fdfb61a35" (UID: "9988da93-f29c-4541-bf93-677fdfb61a35"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 05:03:52 crc kubenswrapper[4798]: I1011 05:03:52.420231 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9988da93-f29c-4541-bf93-677fdfb61a35-kube-api-access-2slfz" (OuterVolumeSpecName: "kube-api-access-2slfz") pod "9988da93-f29c-4541-bf93-677fdfb61a35" (UID: "9988da93-f29c-4541-bf93-677fdfb61a35"). InnerVolumeSpecName "kube-api-access-2slfz". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 05:03:52 crc kubenswrapper[4798]: I1011 05:03:52.499965 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9988da93-f29c-4541-bf93-677fdfb61a35-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "9988da93-f29c-4541-bf93-677fdfb61a35" (UID: "9988da93-f29c-4541-bf93-677fdfb61a35"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 05:03:52 crc kubenswrapper[4798]: I1011 05:03:52.512338 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2slfz\" (UniqueName: \"kubernetes.io/projected/9988da93-f29c-4541-bf93-677fdfb61a35-kube-api-access-2slfz\") on node \"crc\" DevicePath \"\"" Oct 11 05:03:52 crc kubenswrapper[4798]: I1011 05:03:52.512412 4798 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9988da93-f29c-4541-bf93-677fdfb61a35-utilities\") on node \"crc\" DevicePath \"\"" Oct 11 05:03:52 crc kubenswrapper[4798]: I1011 05:03:52.512426 4798 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9988da93-f29c-4541-bf93-677fdfb61a35-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 11 05:03:52 crc kubenswrapper[4798]: I1011 05:03:52.831272 4798 generic.go:334] "Generic (PLEG): container finished" podID="9988da93-f29c-4541-bf93-677fdfb61a35" containerID="3652cf6adabbe03284f80d92219cb05c90cc03571643f2772f5c1274ed832386" exitCode=0 Oct 11 05:03:52 crc kubenswrapper[4798]: I1011 05:03:52.831347 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-9fctq" event={"ID":"9988da93-f29c-4541-bf93-677fdfb61a35","Type":"ContainerDied","Data":"3652cf6adabbe03284f80d92219cb05c90cc03571643f2772f5c1274ed832386"} Oct 11 05:03:52 crc kubenswrapper[4798]: I1011 05:03:52.831825 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-9fctq" event={"ID":"9988da93-f29c-4541-bf93-677fdfb61a35","Type":"ContainerDied","Data":"a4d425a5811b4cf2fcd24596db9c63efb99677b1037952a785062ffeffc2bab0"} Oct 11 05:03:52 crc kubenswrapper[4798]: I1011 05:03:52.831377 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-9fctq" Oct 11 05:03:52 crc kubenswrapper[4798]: I1011 05:03:52.831853 4798 scope.go:117] "RemoveContainer" containerID="3652cf6adabbe03284f80d92219cb05c90cc03571643f2772f5c1274ed832386" Oct 11 05:03:52 crc kubenswrapper[4798]: I1011 05:03:52.850900 4798 scope.go:117] "RemoveContainer" containerID="82663fb1cb0972a4811ebac9aad4f2ea9e2e653cb7df4c582bd2055dd6e88aba" Oct 11 05:03:52 crc kubenswrapper[4798]: I1011 05:03:52.869868 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-9fctq"] Oct 11 05:03:52 crc kubenswrapper[4798]: I1011 05:03:52.877702 4798 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-9fctq"] Oct 11 05:03:52 crc kubenswrapper[4798]: I1011 05:03:52.886032 4798 scope.go:117] "RemoveContainer" containerID="71c45207ae90b48e8804d9297f23f24351ed3b353ba7ffdd8c3896f37f8de9a3" Oct 11 05:03:52 crc kubenswrapper[4798]: I1011 05:03:52.926366 4798 scope.go:117] "RemoveContainer" containerID="3652cf6adabbe03284f80d92219cb05c90cc03571643f2772f5c1274ed832386" Oct 11 05:03:52 crc kubenswrapper[4798]: E1011 05:03:52.926778 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3652cf6adabbe03284f80d92219cb05c90cc03571643f2772f5c1274ed832386\": container with ID starting with 3652cf6adabbe03284f80d92219cb05c90cc03571643f2772f5c1274ed832386 not found: ID does not exist" containerID="3652cf6adabbe03284f80d92219cb05c90cc03571643f2772f5c1274ed832386" Oct 11 05:03:52 crc kubenswrapper[4798]: I1011 05:03:52.926815 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3652cf6adabbe03284f80d92219cb05c90cc03571643f2772f5c1274ed832386"} err="failed to get container status \"3652cf6adabbe03284f80d92219cb05c90cc03571643f2772f5c1274ed832386\": rpc error: code = NotFound desc = could not find container \"3652cf6adabbe03284f80d92219cb05c90cc03571643f2772f5c1274ed832386\": container with ID starting with 3652cf6adabbe03284f80d92219cb05c90cc03571643f2772f5c1274ed832386 not found: ID does not exist" Oct 11 05:03:52 crc kubenswrapper[4798]: I1011 05:03:52.926838 4798 scope.go:117] "RemoveContainer" containerID="82663fb1cb0972a4811ebac9aad4f2ea9e2e653cb7df4c582bd2055dd6e88aba" Oct 11 05:03:52 crc kubenswrapper[4798]: E1011 05:03:52.927093 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"82663fb1cb0972a4811ebac9aad4f2ea9e2e653cb7df4c582bd2055dd6e88aba\": container with ID starting with 82663fb1cb0972a4811ebac9aad4f2ea9e2e653cb7df4c582bd2055dd6e88aba not found: ID does not exist" containerID="82663fb1cb0972a4811ebac9aad4f2ea9e2e653cb7df4c582bd2055dd6e88aba" Oct 11 05:03:52 crc kubenswrapper[4798]: I1011 05:03:52.927147 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"82663fb1cb0972a4811ebac9aad4f2ea9e2e653cb7df4c582bd2055dd6e88aba"} err="failed to get container status \"82663fb1cb0972a4811ebac9aad4f2ea9e2e653cb7df4c582bd2055dd6e88aba\": rpc error: code = NotFound desc = could not find container \"82663fb1cb0972a4811ebac9aad4f2ea9e2e653cb7df4c582bd2055dd6e88aba\": container with ID starting with 82663fb1cb0972a4811ebac9aad4f2ea9e2e653cb7df4c582bd2055dd6e88aba not found: ID does not exist" Oct 11 05:03:52 crc kubenswrapper[4798]: I1011 05:03:52.927188 4798 scope.go:117] "RemoveContainer" containerID="71c45207ae90b48e8804d9297f23f24351ed3b353ba7ffdd8c3896f37f8de9a3" Oct 11 05:03:52 crc kubenswrapper[4798]: E1011 05:03:52.927732 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"71c45207ae90b48e8804d9297f23f24351ed3b353ba7ffdd8c3896f37f8de9a3\": container with ID starting with 71c45207ae90b48e8804d9297f23f24351ed3b353ba7ffdd8c3896f37f8de9a3 not found: ID does not exist" containerID="71c45207ae90b48e8804d9297f23f24351ed3b353ba7ffdd8c3896f37f8de9a3" Oct 11 05:03:52 crc kubenswrapper[4798]: I1011 05:03:52.927769 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"71c45207ae90b48e8804d9297f23f24351ed3b353ba7ffdd8c3896f37f8de9a3"} err="failed to get container status \"71c45207ae90b48e8804d9297f23f24351ed3b353ba7ffdd8c3896f37f8de9a3\": rpc error: code = NotFound desc = could not find container \"71c45207ae90b48e8804d9297f23f24351ed3b353ba7ffdd8c3896f37f8de9a3\": container with ID starting with 71c45207ae90b48e8804d9297f23f24351ed3b353ba7ffdd8c3896f37f8de9a3 not found: ID does not exist" Oct 11 05:03:53 crc kubenswrapper[4798]: I1011 05:03:53.437535 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9988da93-f29c-4541-bf93-677fdfb61a35" path="/var/lib/kubelet/pods/9988da93-f29c-4541-bf93-677fdfb61a35/volumes" Oct 11 05:04:27 crc kubenswrapper[4798]: I1011 05:04:27.138994 4798 patch_prober.go:28] interesting pod/machine-config-daemon-h28s2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 11 05:04:27 crc kubenswrapper[4798]: I1011 05:04:27.140153 4798 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 11 05:04:57 crc kubenswrapper[4798]: I1011 05:04:57.138813 4798 patch_prober.go:28] interesting pod/machine-config-daemon-h28s2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 11 05:04:57 crc kubenswrapper[4798]: I1011 05:04:57.142695 4798 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 11 05:05:01 crc kubenswrapper[4798]: I1011 05:05:01.100959 4798 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/openstack-cell1-galera-0" podUID="2d1fad00-1405-4149-96d6-7ef60d34c4f1" containerName="galera" probeResult="failure" output="command timed out" Oct 11 05:05:01 crc kubenswrapper[4798]: I1011 05:05:01.105153 4798 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/openstack-cell1-galera-0" podUID="2d1fad00-1405-4149-96d6-7ef60d34c4f1" containerName="galera" probeResult="failure" output="command timed out" Oct 11 05:05:23 crc kubenswrapper[4798]: I1011 05:05:23.990460 4798 generic.go:334] "Generic (PLEG): container finished" podID="da22053c-d880-465d-85ed-7bea8aaa0b1a" containerID="81831b3536d759b1e26ee5bc6b4cf509b6bacfb86541be1fc3834f0babc7499b" exitCode=0 Oct 11 05:05:23 crc kubenswrapper[4798]: I1011 05:05:23.990634 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-5cm9g/must-gather-fvqqz" event={"ID":"da22053c-d880-465d-85ed-7bea8aaa0b1a","Type":"ContainerDied","Data":"81831b3536d759b1e26ee5bc6b4cf509b6bacfb86541be1fc3834f0babc7499b"} Oct 11 05:05:23 crc kubenswrapper[4798]: I1011 05:05:23.992394 4798 scope.go:117] "RemoveContainer" containerID="81831b3536d759b1e26ee5bc6b4cf509b6bacfb86541be1fc3834f0babc7499b" Oct 11 05:05:24 crc kubenswrapper[4798]: I1011 05:05:24.912445 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-5cm9g_must-gather-fvqqz_da22053c-d880-465d-85ed-7bea8aaa0b1a/gather/0.log" Oct 11 05:05:27 crc kubenswrapper[4798]: I1011 05:05:27.138102 4798 patch_prober.go:28] interesting pod/machine-config-daemon-h28s2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 11 05:05:27 crc kubenswrapper[4798]: I1011 05:05:27.139105 4798 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 11 05:05:27 crc kubenswrapper[4798]: I1011 05:05:27.139564 4798 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" Oct 11 05:05:27 crc kubenswrapper[4798]: I1011 05:05:27.140174 4798 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"d07d7b07048403baa93d3c75c790ba19ef9dcde5c3340cac72f6e569b4729385"} pod="openshift-machine-config-operator/machine-config-daemon-h28s2" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Oct 11 05:05:27 crc kubenswrapper[4798]: I1011 05:05:27.140240 4798 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" containerName="machine-config-daemon" containerID="cri-o://d07d7b07048403baa93d3c75c790ba19ef9dcde5c3340cac72f6e569b4729385" gracePeriod=600 Oct 11 05:05:27 crc kubenswrapper[4798]: E1011 05:05:27.273877 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h28s2_openshift-machine-config-operator(42571bc8-2186-4e3b-bba9-28f5a8f364d0)\"" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" Oct 11 05:05:28 crc kubenswrapper[4798]: I1011 05:05:28.040296 4798 generic.go:334] "Generic (PLEG): container finished" podID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" containerID="d07d7b07048403baa93d3c75c790ba19ef9dcde5c3340cac72f6e569b4729385" exitCode=0 Oct 11 05:05:28 crc kubenswrapper[4798]: I1011 05:05:28.040444 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" event={"ID":"42571bc8-2186-4e3b-bba9-28f5a8f364d0","Type":"ContainerDied","Data":"d07d7b07048403baa93d3c75c790ba19ef9dcde5c3340cac72f6e569b4729385"} Oct 11 05:05:28 crc kubenswrapper[4798]: I1011 05:05:28.040885 4798 scope.go:117] "RemoveContainer" containerID="1547835e5b38aaac070151fdc3778d195594e548ec1f9e8578c50470ab453c91" Oct 11 05:05:28 crc kubenswrapper[4798]: I1011 05:05:28.042265 4798 scope.go:117] "RemoveContainer" containerID="d07d7b07048403baa93d3c75c790ba19ef9dcde5c3340cac72f6e569b4729385" Oct 11 05:05:28 crc kubenswrapper[4798]: E1011 05:05:28.042889 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h28s2_openshift-machine-config-operator(42571bc8-2186-4e3b-bba9-28f5a8f364d0)\"" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" Oct 11 05:05:35 crc kubenswrapper[4798]: I1011 05:05:35.325504 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-5cm9g/must-gather-fvqqz"] Oct 11 05:05:35 crc kubenswrapper[4798]: I1011 05:05:35.326953 4798 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-must-gather-5cm9g/must-gather-fvqqz" podUID="da22053c-d880-465d-85ed-7bea8aaa0b1a" containerName="copy" containerID="cri-o://d4f804ec67817409ee0c6df0700ea9b5c09c3818487b56d1d607fe388e3d9799" gracePeriod=2 Oct 11 05:05:35 crc kubenswrapper[4798]: I1011 05:05:35.348170 4798 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-5cm9g/must-gather-fvqqz"] Oct 11 05:05:35 crc kubenswrapper[4798]: I1011 05:05:35.813271 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-5cm9g_must-gather-fvqqz_da22053c-d880-465d-85ed-7bea8aaa0b1a/copy/0.log" Oct 11 05:05:35 crc kubenswrapper[4798]: I1011 05:05:35.814210 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-5cm9g/must-gather-fvqqz" Oct 11 05:05:35 crc kubenswrapper[4798]: I1011 05:05:35.883780 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-r7lgc\" (UniqueName: \"kubernetes.io/projected/da22053c-d880-465d-85ed-7bea8aaa0b1a-kube-api-access-r7lgc\") pod \"da22053c-d880-465d-85ed-7bea8aaa0b1a\" (UID: \"da22053c-d880-465d-85ed-7bea8aaa0b1a\") " Oct 11 05:05:35 crc kubenswrapper[4798]: I1011 05:05:35.884135 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/da22053c-d880-465d-85ed-7bea8aaa0b1a-must-gather-output\") pod \"da22053c-d880-465d-85ed-7bea8aaa0b1a\" (UID: \"da22053c-d880-465d-85ed-7bea8aaa0b1a\") " Oct 11 05:05:35 crc kubenswrapper[4798]: I1011 05:05:35.892740 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/da22053c-d880-465d-85ed-7bea8aaa0b1a-kube-api-access-r7lgc" (OuterVolumeSpecName: "kube-api-access-r7lgc") pod "da22053c-d880-465d-85ed-7bea8aaa0b1a" (UID: "da22053c-d880-465d-85ed-7bea8aaa0b1a"). InnerVolumeSpecName "kube-api-access-r7lgc". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 05:05:35 crc kubenswrapper[4798]: I1011 05:05:35.987428 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-r7lgc\" (UniqueName: \"kubernetes.io/projected/da22053c-d880-465d-85ed-7bea8aaa0b1a-kube-api-access-r7lgc\") on node \"crc\" DevicePath \"\"" Oct 11 05:05:36 crc kubenswrapper[4798]: I1011 05:05:36.031314 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/da22053c-d880-465d-85ed-7bea8aaa0b1a-must-gather-output" (OuterVolumeSpecName: "must-gather-output") pod "da22053c-d880-465d-85ed-7bea8aaa0b1a" (UID: "da22053c-d880-465d-85ed-7bea8aaa0b1a"). InnerVolumeSpecName "must-gather-output". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 05:05:36 crc kubenswrapper[4798]: I1011 05:05:36.090627 4798 reconciler_common.go:293] "Volume detached for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/da22053c-d880-465d-85ed-7bea8aaa0b1a-must-gather-output\") on node \"crc\" DevicePath \"\"" Oct 11 05:05:36 crc kubenswrapper[4798]: I1011 05:05:36.138036 4798 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-5cm9g_must-gather-fvqqz_da22053c-d880-465d-85ed-7bea8aaa0b1a/copy/0.log" Oct 11 05:05:36 crc kubenswrapper[4798]: I1011 05:05:36.138705 4798 generic.go:334] "Generic (PLEG): container finished" podID="da22053c-d880-465d-85ed-7bea8aaa0b1a" containerID="d4f804ec67817409ee0c6df0700ea9b5c09c3818487b56d1d607fe388e3d9799" exitCode=143 Oct 11 05:05:36 crc kubenswrapper[4798]: I1011 05:05:36.138805 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-5cm9g/must-gather-fvqqz" Oct 11 05:05:36 crc kubenswrapper[4798]: I1011 05:05:36.138773 4798 scope.go:117] "RemoveContainer" containerID="d4f804ec67817409ee0c6df0700ea9b5c09c3818487b56d1d607fe388e3d9799" Oct 11 05:05:36 crc kubenswrapper[4798]: I1011 05:05:36.170074 4798 scope.go:117] "RemoveContainer" containerID="81831b3536d759b1e26ee5bc6b4cf509b6bacfb86541be1fc3834f0babc7499b" Oct 11 05:05:36 crc kubenswrapper[4798]: I1011 05:05:36.277580 4798 scope.go:117] "RemoveContainer" containerID="d4f804ec67817409ee0c6df0700ea9b5c09c3818487b56d1d607fe388e3d9799" Oct 11 05:05:36 crc kubenswrapper[4798]: E1011 05:05:36.278223 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d4f804ec67817409ee0c6df0700ea9b5c09c3818487b56d1d607fe388e3d9799\": container with ID starting with d4f804ec67817409ee0c6df0700ea9b5c09c3818487b56d1d607fe388e3d9799 not found: ID does not exist" containerID="d4f804ec67817409ee0c6df0700ea9b5c09c3818487b56d1d607fe388e3d9799" Oct 11 05:05:36 crc kubenswrapper[4798]: I1011 05:05:36.278326 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d4f804ec67817409ee0c6df0700ea9b5c09c3818487b56d1d607fe388e3d9799"} err="failed to get container status \"d4f804ec67817409ee0c6df0700ea9b5c09c3818487b56d1d607fe388e3d9799\": rpc error: code = NotFound desc = could not find container \"d4f804ec67817409ee0c6df0700ea9b5c09c3818487b56d1d607fe388e3d9799\": container with ID starting with d4f804ec67817409ee0c6df0700ea9b5c09c3818487b56d1d607fe388e3d9799 not found: ID does not exist" Oct 11 05:05:36 crc kubenswrapper[4798]: I1011 05:05:36.278461 4798 scope.go:117] "RemoveContainer" containerID="81831b3536d759b1e26ee5bc6b4cf509b6bacfb86541be1fc3834f0babc7499b" Oct 11 05:05:36 crc kubenswrapper[4798]: E1011 05:05:36.279016 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"81831b3536d759b1e26ee5bc6b4cf509b6bacfb86541be1fc3834f0babc7499b\": container with ID starting with 81831b3536d759b1e26ee5bc6b4cf509b6bacfb86541be1fc3834f0babc7499b not found: ID does not exist" containerID="81831b3536d759b1e26ee5bc6b4cf509b6bacfb86541be1fc3834f0babc7499b" Oct 11 05:05:36 crc kubenswrapper[4798]: I1011 05:05:36.279110 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"81831b3536d759b1e26ee5bc6b4cf509b6bacfb86541be1fc3834f0babc7499b"} err="failed to get container status \"81831b3536d759b1e26ee5bc6b4cf509b6bacfb86541be1fc3834f0babc7499b\": rpc error: code = NotFound desc = could not find container \"81831b3536d759b1e26ee5bc6b4cf509b6bacfb86541be1fc3834f0babc7499b\": container with ID starting with 81831b3536d759b1e26ee5bc6b4cf509b6bacfb86541be1fc3834f0babc7499b not found: ID does not exist" Oct 11 05:05:37 crc kubenswrapper[4798]: I1011 05:05:37.452697 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="da22053c-d880-465d-85ed-7bea8aaa0b1a" path="/var/lib/kubelet/pods/da22053c-d880-465d-85ed-7bea8aaa0b1a/volumes" Oct 11 05:05:43 crc kubenswrapper[4798]: I1011 05:05:43.425559 4798 scope.go:117] "RemoveContainer" containerID="d07d7b07048403baa93d3c75c790ba19ef9dcde5c3340cac72f6e569b4729385" Oct 11 05:05:43 crc kubenswrapper[4798]: E1011 05:05:43.426981 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h28s2_openshift-machine-config-operator(42571bc8-2186-4e3b-bba9-28f5a8f364d0)\"" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" Oct 11 05:05:58 crc kubenswrapper[4798]: I1011 05:05:58.425167 4798 scope.go:117] "RemoveContainer" containerID="d07d7b07048403baa93d3c75c790ba19ef9dcde5c3340cac72f6e569b4729385" Oct 11 05:05:58 crc kubenswrapper[4798]: E1011 05:05:58.426700 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h28s2_openshift-machine-config-operator(42571bc8-2186-4e3b-bba9-28f5a8f364d0)\"" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" Oct 11 05:06:10 crc kubenswrapper[4798]: I1011 05:06:10.423597 4798 scope.go:117] "RemoveContainer" containerID="d07d7b07048403baa93d3c75c790ba19ef9dcde5c3340cac72f6e569b4729385" Oct 11 05:06:10 crc kubenswrapper[4798]: E1011 05:06:10.426195 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h28s2_openshift-machine-config-operator(42571bc8-2186-4e3b-bba9-28f5a8f364d0)\"" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" Oct 11 05:06:21 crc kubenswrapper[4798]: I1011 05:06:21.425811 4798 scope.go:117] "RemoveContainer" containerID="d07d7b07048403baa93d3c75c790ba19ef9dcde5c3340cac72f6e569b4729385" Oct 11 05:06:21 crc kubenswrapper[4798]: E1011 05:06:21.428593 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h28s2_openshift-machine-config-operator(42571bc8-2186-4e3b-bba9-28f5a8f364d0)\"" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" Oct 11 05:06:32 crc kubenswrapper[4798]: I1011 05:06:32.425206 4798 scope.go:117] "RemoveContainer" containerID="d07d7b07048403baa93d3c75c790ba19ef9dcde5c3340cac72f6e569b4729385" Oct 11 05:06:32 crc kubenswrapper[4798]: E1011 05:06:32.426672 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h28s2_openshift-machine-config-operator(42571bc8-2186-4e3b-bba9-28f5a8f364d0)\"" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" Oct 11 05:06:45 crc kubenswrapper[4798]: I1011 05:06:45.439207 4798 scope.go:117] "RemoveContainer" containerID="d07d7b07048403baa93d3c75c790ba19ef9dcde5c3340cac72f6e569b4729385" Oct 11 05:06:45 crc kubenswrapper[4798]: E1011 05:06:45.440694 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h28s2_openshift-machine-config-operator(42571bc8-2186-4e3b-bba9-28f5a8f364d0)\"" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" Oct 11 05:06:53 crc kubenswrapper[4798]: I1011 05:06:53.984265 4798 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-p5dl7"] Oct 11 05:06:53 crc kubenswrapper[4798]: E1011 05:06:53.985378 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9988da93-f29c-4541-bf93-677fdfb61a35" containerName="extract-content" Oct 11 05:06:53 crc kubenswrapper[4798]: I1011 05:06:53.985415 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="9988da93-f29c-4541-bf93-677fdfb61a35" containerName="extract-content" Oct 11 05:06:53 crc kubenswrapper[4798]: E1011 05:06:53.985434 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="da22053c-d880-465d-85ed-7bea8aaa0b1a" containerName="copy" Oct 11 05:06:53 crc kubenswrapper[4798]: I1011 05:06:53.985442 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="da22053c-d880-465d-85ed-7bea8aaa0b1a" containerName="copy" Oct 11 05:06:53 crc kubenswrapper[4798]: E1011 05:06:53.985461 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9988da93-f29c-4541-bf93-677fdfb61a35" containerName="registry-server" Oct 11 05:06:53 crc kubenswrapper[4798]: I1011 05:06:53.985469 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="9988da93-f29c-4541-bf93-677fdfb61a35" containerName="registry-server" Oct 11 05:06:53 crc kubenswrapper[4798]: E1011 05:06:53.985484 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9988da93-f29c-4541-bf93-677fdfb61a35" containerName="extract-utilities" Oct 11 05:06:53 crc kubenswrapper[4798]: I1011 05:06:53.985492 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="9988da93-f29c-4541-bf93-677fdfb61a35" containerName="extract-utilities" Oct 11 05:06:53 crc kubenswrapper[4798]: E1011 05:06:53.985518 4798 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="da22053c-d880-465d-85ed-7bea8aaa0b1a" containerName="gather" Oct 11 05:06:53 crc kubenswrapper[4798]: I1011 05:06:53.985526 4798 state_mem.go:107] "Deleted CPUSet assignment" podUID="da22053c-d880-465d-85ed-7bea8aaa0b1a" containerName="gather" Oct 11 05:06:53 crc kubenswrapper[4798]: I1011 05:06:53.985783 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="da22053c-d880-465d-85ed-7bea8aaa0b1a" containerName="gather" Oct 11 05:06:53 crc kubenswrapper[4798]: I1011 05:06:53.985807 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="9988da93-f29c-4541-bf93-677fdfb61a35" containerName="registry-server" Oct 11 05:06:53 crc kubenswrapper[4798]: I1011 05:06:53.985819 4798 memory_manager.go:354] "RemoveStaleState removing state" podUID="da22053c-d880-465d-85ed-7bea8aaa0b1a" containerName="copy" Oct 11 05:06:53 crc kubenswrapper[4798]: I1011 05:06:53.987536 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-p5dl7" Oct 11 05:06:54 crc kubenswrapper[4798]: I1011 05:06:54.002820 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-p5dl7"] Oct 11 05:06:54 crc kubenswrapper[4798]: I1011 05:06:54.126909 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7e363c0e-ac57-4593-9dd6-f2e7a6c40da2-catalog-content\") pod \"certified-operators-p5dl7\" (UID: \"7e363c0e-ac57-4593-9dd6-f2e7a6c40da2\") " pod="openshift-marketplace/certified-operators-p5dl7" Oct 11 05:06:54 crc kubenswrapper[4798]: I1011 05:06:54.127514 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vq6xk\" (UniqueName: \"kubernetes.io/projected/7e363c0e-ac57-4593-9dd6-f2e7a6c40da2-kube-api-access-vq6xk\") pod \"certified-operators-p5dl7\" (UID: \"7e363c0e-ac57-4593-9dd6-f2e7a6c40da2\") " pod="openshift-marketplace/certified-operators-p5dl7" Oct 11 05:06:54 crc kubenswrapper[4798]: I1011 05:06:54.127590 4798 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7e363c0e-ac57-4593-9dd6-f2e7a6c40da2-utilities\") pod \"certified-operators-p5dl7\" (UID: \"7e363c0e-ac57-4593-9dd6-f2e7a6c40da2\") " pod="openshift-marketplace/certified-operators-p5dl7" Oct 11 05:06:54 crc kubenswrapper[4798]: I1011 05:06:54.229683 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7e363c0e-ac57-4593-9dd6-f2e7a6c40da2-catalog-content\") pod \"certified-operators-p5dl7\" (UID: \"7e363c0e-ac57-4593-9dd6-f2e7a6c40da2\") " pod="openshift-marketplace/certified-operators-p5dl7" Oct 11 05:06:54 crc kubenswrapper[4798]: I1011 05:06:54.229858 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vq6xk\" (UniqueName: \"kubernetes.io/projected/7e363c0e-ac57-4593-9dd6-f2e7a6c40da2-kube-api-access-vq6xk\") pod \"certified-operators-p5dl7\" (UID: \"7e363c0e-ac57-4593-9dd6-f2e7a6c40da2\") " pod="openshift-marketplace/certified-operators-p5dl7" Oct 11 05:06:54 crc kubenswrapper[4798]: I1011 05:06:54.229886 4798 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7e363c0e-ac57-4593-9dd6-f2e7a6c40da2-utilities\") pod \"certified-operators-p5dl7\" (UID: \"7e363c0e-ac57-4593-9dd6-f2e7a6c40da2\") " pod="openshift-marketplace/certified-operators-p5dl7" Oct 11 05:06:54 crc kubenswrapper[4798]: I1011 05:06:54.230715 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7e363c0e-ac57-4593-9dd6-f2e7a6c40da2-utilities\") pod \"certified-operators-p5dl7\" (UID: \"7e363c0e-ac57-4593-9dd6-f2e7a6c40da2\") " pod="openshift-marketplace/certified-operators-p5dl7" Oct 11 05:06:54 crc kubenswrapper[4798]: I1011 05:06:54.230714 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7e363c0e-ac57-4593-9dd6-f2e7a6c40da2-catalog-content\") pod \"certified-operators-p5dl7\" (UID: \"7e363c0e-ac57-4593-9dd6-f2e7a6c40da2\") " pod="openshift-marketplace/certified-operators-p5dl7" Oct 11 05:06:54 crc kubenswrapper[4798]: I1011 05:06:54.254461 4798 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vq6xk\" (UniqueName: \"kubernetes.io/projected/7e363c0e-ac57-4593-9dd6-f2e7a6c40da2-kube-api-access-vq6xk\") pod \"certified-operators-p5dl7\" (UID: \"7e363c0e-ac57-4593-9dd6-f2e7a6c40da2\") " pod="openshift-marketplace/certified-operators-p5dl7" Oct 11 05:06:54 crc kubenswrapper[4798]: I1011 05:06:54.340888 4798 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-p5dl7" Oct 11 05:06:54 crc kubenswrapper[4798]: I1011 05:06:54.867478 4798 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-p5dl7"] Oct 11 05:06:55 crc kubenswrapper[4798]: I1011 05:06:55.181767 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-p5dl7" event={"ID":"7e363c0e-ac57-4593-9dd6-f2e7a6c40da2","Type":"ContainerStarted","Data":"c44a7ae9d3a50ec200bc3f8e280fc2320d990bad731168cd44365b70ad4b25e3"} Oct 11 05:06:55 crc kubenswrapper[4798]: I1011 05:06:55.182205 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-p5dl7" event={"ID":"7e363c0e-ac57-4593-9dd6-f2e7a6c40da2","Type":"ContainerStarted","Data":"691a967f0bab417c7b8df21fce573a829ba4f56968917c7ecbec5717fcb96616"} Oct 11 05:06:56 crc kubenswrapper[4798]: I1011 05:06:56.194168 4798 generic.go:334] "Generic (PLEG): container finished" podID="7e363c0e-ac57-4593-9dd6-f2e7a6c40da2" containerID="c44a7ae9d3a50ec200bc3f8e280fc2320d990bad731168cd44365b70ad4b25e3" exitCode=0 Oct 11 05:06:56 crc kubenswrapper[4798]: I1011 05:06:56.194250 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-p5dl7" event={"ID":"7e363c0e-ac57-4593-9dd6-f2e7a6c40da2","Type":"ContainerDied","Data":"c44a7ae9d3a50ec200bc3f8e280fc2320d990bad731168cd44365b70ad4b25e3"} Oct 11 05:06:57 crc kubenswrapper[4798]: I1011 05:06:57.208248 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-p5dl7" event={"ID":"7e363c0e-ac57-4593-9dd6-f2e7a6c40da2","Type":"ContainerStarted","Data":"2cbf9cc37dec48b28b65934663287ac0f0620fd2b530a0ed33a0c55edaf4b80b"} Oct 11 05:06:58 crc kubenswrapper[4798]: I1011 05:06:58.222643 4798 generic.go:334] "Generic (PLEG): container finished" podID="7e363c0e-ac57-4593-9dd6-f2e7a6c40da2" containerID="2cbf9cc37dec48b28b65934663287ac0f0620fd2b530a0ed33a0c55edaf4b80b" exitCode=0 Oct 11 05:06:58 crc kubenswrapper[4798]: I1011 05:06:58.222904 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-p5dl7" event={"ID":"7e363c0e-ac57-4593-9dd6-f2e7a6c40da2","Type":"ContainerDied","Data":"2cbf9cc37dec48b28b65934663287ac0f0620fd2b530a0ed33a0c55edaf4b80b"} Oct 11 05:06:58 crc kubenswrapper[4798]: I1011 05:06:58.425316 4798 scope.go:117] "RemoveContainer" containerID="d07d7b07048403baa93d3c75c790ba19ef9dcde5c3340cac72f6e569b4729385" Oct 11 05:06:58 crc kubenswrapper[4798]: E1011 05:06:58.425791 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h28s2_openshift-machine-config-operator(42571bc8-2186-4e3b-bba9-28f5a8f364d0)\"" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" Oct 11 05:07:00 crc kubenswrapper[4798]: I1011 05:07:00.245733 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-p5dl7" event={"ID":"7e363c0e-ac57-4593-9dd6-f2e7a6c40da2","Type":"ContainerStarted","Data":"22f33da98d2eaa42c8e8c496f0f6d74a7cc8cb3a3bcd56c302bac82050f42e82"} Oct 11 05:07:00 crc kubenswrapper[4798]: I1011 05:07:00.280672 4798 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-p5dl7" podStartSLOduration=4.729065264 podStartE2EDuration="7.280650003s" podCreationTimestamp="2025-10-11 05:06:53 +0000 UTC" firstStartedPulling="2025-10-11 05:06:56.198973896 +0000 UTC m=+4311.535263622" lastFinishedPulling="2025-10-11 05:06:58.750558675 +0000 UTC m=+4314.086848361" observedRunningTime="2025-10-11 05:07:00.274547811 +0000 UTC m=+4315.610837507" watchObservedRunningTime="2025-10-11 05:07:00.280650003 +0000 UTC m=+4315.616939689" Oct 11 05:07:04 crc kubenswrapper[4798]: I1011 05:07:04.342244 4798 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-p5dl7" Oct 11 05:07:04 crc kubenswrapper[4798]: I1011 05:07:04.342963 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-p5dl7" Oct 11 05:07:04 crc kubenswrapper[4798]: I1011 05:07:04.409263 4798 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-p5dl7" Oct 11 05:07:05 crc kubenswrapper[4798]: I1011 05:07:05.372088 4798 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-p5dl7" Oct 11 05:07:05 crc kubenswrapper[4798]: I1011 05:07:05.458346 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-p5dl7"] Oct 11 05:07:07 crc kubenswrapper[4798]: I1011 05:07:07.325929 4798 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-p5dl7" podUID="7e363c0e-ac57-4593-9dd6-f2e7a6c40da2" containerName="registry-server" containerID="cri-o://22f33da98d2eaa42c8e8c496f0f6d74a7cc8cb3a3bcd56c302bac82050f42e82" gracePeriod=2 Oct 11 05:07:07 crc kubenswrapper[4798]: I1011 05:07:07.883582 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-p5dl7" Oct 11 05:07:07 crc kubenswrapper[4798]: I1011 05:07:07.973312 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7e363c0e-ac57-4593-9dd6-f2e7a6c40da2-utilities\") pod \"7e363c0e-ac57-4593-9dd6-f2e7a6c40da2\" (UID: \"7e363c0e-ac57-4593-9dd6-f2e7a6c40da2\") " Oct 11 05:07:07 crc kubenswrapper[4798]: I1011 05:07:07.973447 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vq6xk\" (UniqueName: \"kubernetes.io/projected/7e363c0e-ac57-4593-9dd6-f2e7a6c40da2-kube-api-access-vq6xk\") pod \"7e363c0e-ac57-4593-9dd6-f2e7a6c40da2\" (UID: \"7e363c0e-ac57-4593-9dd6-f2e7a6c40da2\") " Oct 11 05:07:07 crc kubenswrapper[4798]: I1011 05:07:07.973525 4798 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7e363c0e-ac57-4593-9dd6-f2e7a6c40da2-catalog-content\") pod \"7e363c0e-ac57-4593-9dd6-f2e7a6c40da2\" (UID: \"7e363c0e-ac57-4593-9dd6-f2e7a6c40da2\") " Oct 11 05:07:07 crc kubenswrapper[4798]: I1011 05:07:07.974572 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7e363c0e-ac57-4593-9dd6-f2e7a6c40da2-utilities" (OuterVolumeSpecName: "utilities") pod "7e363c0e-ac57-4593-9dd6-f2e7a6c40da2" (UID: "7e363c0e-ac57-4593-9dd6-f2e7a6c40da2"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 05:07:07 crc kubenswrapper[4798]: I1011 05:07:07.981720 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7e363c0e-ac57-4593-9dd6-f2e7a6c40da2-kube-api-access-vq6xk" (OuterVolumeSpecName: "kube-api-access-vq6xk") pod "7e363c0e-ac57-4593-9dd6-f2e7a6c40da2" (UID: "7e363c0e-ac57-4593-9dd6-f2e7a6c40da2"). InnerVolumeSpecName "kube-api-access-vq6xk". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 05:07:08 crc kubenswrapper[4798]: I1011 05:07:08.032141 4798 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7e363c0e-ac57-4593-9dd6-f2e7a6c40da2-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "7e363c0e-ac57-4593-9dd6-f2e7a6c40da2" (UID: "7e363c0e-ac57-4593-9dd6-f2e7a6c40da2"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 05:07:08 crc kubenswrapper[4798]: I1011 05:07:08.077200 4798 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7e363c0e-ac57-4593-9dd6-f2e7a6c40da2-utilities\") on node \"crc\" DevicePath \"\"" Oct 11 05:07:08 crc kubenswrapper[4798]: I1011 05:07:08.077251 4798 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vq6xk\" (UniqueName: \"kubernetes.io/projected/7e363c0e-ac57-4593-9dd6-f2e7a6c40da2-kube-api-access-vq6xk\") on node \"crc\" DevicePath \"\"" Oct 11 05:07:08 crc kubenswrapper[4798]: I1011 05:07:08.077317 4798 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7e363c0e-ac57-4593-9dd6-f2e7a6c40da2-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 11 05:07:08 crc kubenswrapper[4798]: I1011 05:07:08.341430 4798 generic.go:334] "Generic (PLEG): container finished" podID="7e363c0e-ac57-4593-9dd6-f2e7a6c40da2" containerID="22f33da98d2eaa42c8e8c496f0f6d74a7cc8cb3a3bcd56c302bac82050f42e82" exitCode=0 Oct 11 05:07:08 crc kubenswrapper[4798]: I1011 05:07:08.341878 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-p5dl7" event={"ID":"7e363c0e-ac57-4593-9dd6-f2e7a6c40da2","Type":"ContainerDied","Data":"22f33da98d2eaa42c8e8c496f0f6d74a7cc8cb3a3bcd56c302bac82050f42e82"} Oct 11 05:07:08 crc kubenswrapper[4798]: I1011 05:07:08.341922 4798 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-p5dl7" event={"ID":"7e363c0e-ac57-4593-9dd6-f2e7a6c40da2","Type":"ContainerDied","Data":"691a967f0bab417c7b8df21fce573a829ba4f56968917c7ecbec5717fcb96616"} Oct 11 05:07:08 crc kubenswrapper[4798]: I1011 05:07:08.341958 4798 scope.go:117] "RemoveContainer" containerID="22f33da98d2eaa42c8e8c496f0f6d74a7cc8cb3a3bcd56c302bac82050f42e82" Oct 11 05:07:08 crc kubenswrapper[4798]: I1011 05:07:08.342175 4798 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-p5dl7" Oct 11 05:07:08 crc kubenswrapper[4798]: I1011 05:07:08.379923 4798 scope.go:117] "RemoveContainer" containerID="2cbf9cc37dec48b28b65934663287ac0f0620fd2b530a0ed33a0c55edaf4b80b" Oct 11 05:07:08 crc kubenswrapper[4798]: I1011 05:07:08.405906 4798 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-p5dl7"] Oct 11 05:07:08 crc kubenswrapper[4798]: I1011 05:07:08.432659 4798 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-p5dl7"] Oct 11 05:07:08 crc kubenswrapper[4798]: I1011 05:07:08.434858 4798 scope.go:117] "RemoveContainer" containerID="c44a7ae9d3a50ec200bc3f8e280fc2320d990bad731168cd44365b70ad4b25e3" Oct 11 05:07:08 crc kubenswrapper[4798]: I1011 05:07:08.461546 4798 scope.go:117] "RemoveContainer" containerID="22f33da98d2eaa42c8e8c496f0f6d74a7cc8cb3a3bcd56c302bac82050f42e82" Oct 11 05:07:08 crc kubenswrapper[4798]: E1011 05:07:08.462172 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"22f33da98d2eaa42c8e8c496f0f6d74a7cc8cb3a3bcd56c302bac82050f42e82\": container with ID starting with 22f33da98d2eaa42c8e8c496f0f6d74a7cc8cb3a3bcd56c302bac82050f42e82 not found: ID does not exist" containerID="22f33da98d2eaa42c8e8c496f0f6d74a7cc8cb3a3bcd56c302bac82050f42e82" Oct 11 05:07:08 crc kubenswrapper[4798]: I1011 05:07:08.462327 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"22f33da98d2eaa42c8e8c496f0f6d74a7cc8cb3a3bcd56c302bac82050f42e82"} err="failed to get container status \"22f33da98d2eaa42c8e8c496f0f6d74a7cc8cb3a3bcd56c302bac82050f42e82\": rpc error: code = NotFound desc = could not find container \"22f33da98d2eaa42c8e8c496f0f6d74a7cc8cb3a3bcd56c302bac82050f42e82\": container with ID starting with 22f33da98d2eaa42c8e8c496f0f6d74a7cc8cb3a3bcd56c302bac82050f42e82 not found: ID does not exist" Oct 11 05:07:08 crc kubenswrapper[4798]: I1011 05:07:08.462382 4798 scope.go:117] "RemoveContainer" containerID="2cbf9cc37dec48b28b65934663287ac0f0620fd2b530a0ed33a0c55edaf4b80b" Oct 11 05:07:08 crc kubenswrapper[4798]: E1011 05:07:08.462868 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2cbf9cc37dec48b28b65934663287ac0f0620fd2b530a0ed33a0c55edaf4b80b\": container with ID starting with 2cbf9cc37dec48b28b65934663287ac0f0620fd2b530a0ed33a0c55edaf4b80b not found: ID does not exist" containerID="2cbf9cc37dec48b28b65934663287ac0f0620fd2b530a0ed33a0c55edaf4b80b" Oct 11 05:07:08 crc kubenswrapper[4798]: I1011 05:07:08.462916 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2cbf9cc37dec48b28b65934663287ac0f0620fd2b530a0ed33a0c55edaf4b80b"} err="failed to get container status \"2cbf9cc37dec48b28b65934663287ac0f0620fd2b530a0ed33a0c55edaf4b80b\": rpc error: code = NotFound desc = could not find container \"2cbf9cc37dec48b28b65934663287ac0f0620fd2b530a0ed33a0c55edaf4b80b\": container with ID starting with 2cbf9cc37dec48b28b65934663287ac0f0620fd2b530a0ed33a0c55edaf4b80b not found: ID does not exist" Oct 11 05:07:08 crc kubenswrapper[4798]: I1011 05:07:08.462943 4798 scope.go:117] "RemoveContainer" containerID="c44a7ae9d3a50ec200bc3f8e280fc2320d990bad731168cd44365b70ad4b25e3" Oct 11 05:07:08 crc kubenswrapper[4798]: E1011 05:07:08.463837 4798 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c44a7ae9d3a50ec200bc3f8e280fc2320d990bad731168cd44365b70ad4b25e3\": container with ID starting with c44a7ae9d3a50ec200bc3f8e280fc2320d990bad731168cd44365b70ad4b25e3 not found: ID does not exist" containerID="c44a7ae9d3a50ec200bc3f8e280fc2320d990bad731168cd44365b70ad4b25e3" Oct 11 05:07:08 crc kubenswrapper[4798]: I1011 05:07:08.463885 4798 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c44a7ae9d3a50ec200bc3f8e280fc2320d990bad731168cd44365b70ad4b25e3"} err="failed to get container status \"c44a7ae9d3a50ec200bc3f8e280fc2320d990bad731168cd44365b70ad4b25e3\": rpc error: code = NotFound desc = could not find container \"c44a7ae9d3a50ec200bc3f8e280fc2320d990bad731168cd44365b70ad4b25e3\": container with ID starting with c44a7ae9d3a50ec200bc3f8e280fc2320d990bad731168cd44365b70ad4b25e3 not found: ID does not exist" Oct 11 05:07:09 crc kubenswrapper[4798]: I1011 05:07:09.438894 4798 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7e363c0e-ac57-4593-9dd6-f2e7a6c40da2" path="/var/lib/kubelet/pods/7e363c0e-ac57-4593-9dd6-f2e7a6c40da2/volumes" Oct 11 05:07:11 crc kubenswrapper[4798]: I1011 05:07:11.426549 4798 scope.go:117] "RemoveContainer" containerID="d07d7b07048403baa93d3c75c790ba19ef9dcde5c3340cac72f6e569b4729385" Oct 11 05:07:11 crc kubenswrapper[4798]: E1011 05:07:11.427637 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h28s2_openshift-machine-config-operator(42571bc8-2186-4e3b-bba9-28f5a8f364d0)\"" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" Oct 11 05:07:22 crc kubenswrapper[4798]: I1011 05:07:22.425478 4798 scope.go:117] "RemoveContainer" containerID="d07d7b07048403baa93d3c75c790ba19ef9dcde5c3340cac72f6e569b4729385" Oct 11 05:07:22 crc kubenswrapper[4798]: E1011 05:07:22.426373 4798 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h28s2_openshift-machine-config-operator(42571bc8-2186-4e3b-bba9-28f5a8f364d0)\"" pod="openshift-machine-config-operator/machine-config-daemon-h28s2" podUID="42571bc8-2186-4e3b-bba9-28f5a8f364d0" var/home/core/zuul-output/logs/crc-cloud-workdir-crc-all-logs.tar.gz0000644000175000000000000000005515072363024024447 0ustar coreroot‹íÁ  ÷Om7 €7šÞ'(var/home/core/zuul-output/logs/crc-cloud/0000755000175000000000000000000015072363025017365 5ustar corerootvar/home/core/zuul-output/artifacts/0000755000175000017500000000000015072352113016504 5ustar corecorevar/home/core/zuul-output/docs/0000755000175000017500000000000015072352113015454 5ustar corecore